This file controls the operation of the bdflush kernel
daemon. The source code to this struct can be found in
linux/fs/buffer.c. It currently contains 9 integer values,
-of which 6 are actually used by the kernel.
+of which 4 are actually used by the kernel.
From linux/fs/buffer.c:
--------------------------------------------------------------
-union bdflush_param{
- struct {
- int nfract; /* Percentage of buffer cache dirty to
- activate bdflush */
- int ndirty; /* Maximum number of dirty blocks to
- write out per wake-cycle */
- int nrefill; /* Number of clean buffers to try to
- obtain each time we call refill */
- int nref_dirt; /* Dirty buffer threshold for activating
- bdflush when trying to refill buffers. */
- int dummy1; /* unused */
- int age_buffer; /* Time for normal buffer to age before
- we flush it */
- int age_super; /* Time for superblock to age before we
- flush it */
- int dummy2; /* unused */
- int dummy3; /* unused */
- } b_un;
- unsigned int data[N_PARAM];
-} bdf_prm = {{40, 500, 64, 256, 15, 30*HZ, 5*HZ, 1884, 2}};
+union bdflush_param {
+ struct {
+ int nfract; /* Percentage of buffer cache dirty to
+ activate bdflush */
+ int dummy1; /* old "ndirty" */
+ int dummy2; /* old "nrefill" */
+ int dummy3; /* unused */
+ int interval; /* jiffies delay between kupdate flushes */
+ int age_buffer; /* Time for normal buffer to age */
+ int nfract_sync;/* Percentage of buffer cache dirty to
+ activate bdflush synchronously */
+ int dummy4; /* unused */
+ int dummy5; /* unused */
+ } b_un;
+ unsigned int data[N_PARAM];
+} bdf_prm = {{30, 64, 64, 256, 5*HZ, 30*HZ, 60, 0, 0}};
--------------------------------------------------------------
+int nfract:
The first parameter governs the maximum number of dirty
buffers in the buffer cache. Dirty means that the contents
of the buffer still have to be written to disk (as opposed
Setting this to a high value means that Linux can delay disk
writes for a long time, but it also means that it will have
to do a lot of I/O at once when memory becomes short. A low
-value will spread out disk I/O more evenly.
-
-The second parameter (ndirty) gives the maximum number of
-dirty buffers that bdflush can write to the disk in one time.
-A high value will mean delayed, bursty I/O, while a small
-value can lead to memory shortage when bdflush isn't woken
-up often enough...
-
-The third parameter (nrefill) is the number of buffers that
-bdflush will add to the list of free buffers when
-refill_freelist() is called. It is necessary to allocate free
-buffers beforehand, since the buffers often are of a different
-size than memory pages and some bookkeeping needs to be done
-beforehand. The higher the number, the more memory will be
-wasted and the less often refill_freelist() will need to run.
-
-When refill_freelist() comes across more than nref_dirt dirty
-buffers, it will wake up bdflush.
-
-Finally, the age_buffer and age_super parameters govern the
-maximum time Linux waits before writing out a dirty buffer
-to disk. The value is expressed in jiffies (clockticks), the
-number of jiffies per second is 100, except on Alpha machines
-(1024). Age_buffer is the maximum age for data blocks, while
-age_super is for filesystem metadata.
-
+value will spread out disk I/O more evenly, at the cost of
+more frequent I/O operations. The default value is 30%,
+the minimum is 0%, and the maximum is 100%.
+
+int interval:
+The fifth parameter, interval, is the minimum rate at
+which kupdate will wake and flush. The value is expressed in
+jiffies (clockticks), the number of jiffies per second is
+normally 100 (Alpha is 1024). Thus, x*HZ is x seconds. The
+default value is 5 seconds, the minimum is 0 seconds, and the
+maximum is 600 seconds.
+
+int age_buffer:
+The sixth parameter, age_buffer, governs the maximum time
+Linux waits before writing out a dirty buffer to disk. The
+value is in jiffies. The default value is 30 seconds,
+the minimum is 1 second, and the maximum 6,000 seconds.
+
+int nfract_sync:
+The seventh parameter, nfract_sync, governs the percentage
+of buffer cache that is dirty before bdflush activates
+synchronously. This can be viewed as the hard limit before
+bdflush forces buffers to disk. The default is 60%, the
+minimum is 0%, and the maximum is 100%.
+
==============================================================
buffermem:
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 10
-EXTRAVERSION =-pre7
+EXTRAVERSION =-pre8
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
static struct exec_domain arthur_exec_domain = {
"Arthur", /* name */
- (lcall7_func)arthur_lcall7,
+ arthur_lcall7,
PER_RISCOS, PER_RISCOS,
arthur_to_linux_signals,
linux_to_arthur_signals,
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
+#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/elf.h>
#include <linux/init.h>
case RH_GET_DESCRIPTOR:
switch ((wValue & 0xff00) >> 8) {
case (0x01): /* device descriptor */
- len = min(unsigned int, leni, min(unsigned int, sizeof (root_hub_dev_des), wLength));
+ len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_dev_des), wLength));
memcpy (data, root_hub_dev_des, len);
OK (len);
case (0x02): /* configuration descriptor */
- len = min(unsigned int, leni, min(unsigned int, sizeof (root_hub_config_des), wLength));
+ len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_config_des), wLength));
memcpy (data, root_hub_config_des, len);
OK (len);
case (0x03): /* string descriptors */
0xff, "ETRAX 100LX",
data, wLength);
if (len > 0) {
- OK(min(int, leni, len));
+ OK(min_t(int, leni, len));
} else
stat = -EPIPE;
}
case RH_GET_DESCRIPTOR | RH_CLASS:
root_hub_hub_des[2] = hc->rh.numports;
- len = min(unsigned int, leni, min(unsigned int, sizeof (root_hub_hub_des), wLength));
+ len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_hub_des), wLength));
memcpy (data, root_hub_hub_des, len);
OK (len);
EXPORT_SYMBOL(apm_info);
EXPORT_SYMBOL(gdt);
-#ifdef CONFIG_IO_DEBUG
+#ifdef CONFIG_DEBUG_IOVIRT
EXPORT_SYMBOL(__io_virt_debug);
#endif
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tty.h>
+#include <linux/personality.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/i387.h>
obj-$(CONFIG_X86_USE_3DNOW) += mmx.o
obj-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
+obj-$(CONFIG_DEBUG_IOVIRT) += iodebug.o
include $(TOPDIR)/Rules.make
struct exec_domain solaris_exec_domain = {
"Solaris",
- (lcall7_func)NULL,
+ NULL,
1, 1, /* PER_SVR4 personality */
solaris_to_linux_signals,
linux_to_solaris_signals,
}
if (ctl_maxlen >= 0 && sock->pfirst) {
struct T_primsg *it = sock->pfirst;
- int l = min(int, ctl_maxlen, it->length);
+ int l = min_t(int, ctl_maxlen, it->length);
SCHECK_MAGIC((char*)((u64)(((char *)&it->type)+sock->offset+it->length+7)&~7),MKCTL_MAGIC);
SOLD("purting ctl data");
if(copy_to_user(ctl_buf,
case BLKGETSIZE:
return put_user (mfm[minor].nr_sects, (long *)arg);
+ case BLKGETSIZE64:
+ return put_user ((u64)mfm[minor].nr_sects << 9, (u64 *)arg);
case BLKFRASET:
if (!capable(CAP_SYS_ADMIN))
/*
* Allocate some buffer space, limited to half the buffer size
*/
- length = min(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2);
+ length = min_t(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2);
if (length) {
host->dma.start_addr = address = host->dma.free_addr;
host->dma.free_addr = (host->dma.free_addr + length) &
/*
* Allocate some buffer space, limited to half the on-board RAM size
*/
- length = min(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2);
+ length = min_t(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2);
if (length) {
host->dma.start_addr = address = host->dma.free_addr;
host->dma.free_addr = (host->dma.free_addr + length) &
* to be in operation AFTER the target leaves message out phase.
*/
acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
- period = max(unsigned int, message[3], sdtr_period / 4);
- length = min(unsigned int, message[4], sdtr_size);
+ period = max_t(unsigned int, message[3], sdtr_period / 4);
+ length = min_t(unsigned int, message[4], sdtr_size);
msgqueue_addmsg(&host->scsi.msgs, 5, EXTENDED_MESSAGE, 3,
EXTENDED_SDTR, period, length);
host->device[host->SCpnt->target].sync_xfer =
sizeof(DiskGeometry_T)) ? -EFAULT : 0);
case BLKGETSIZE:
/* Get Device Size. */
- if ((long *) Argument == NULL) return -EINVAL;
return put_user(Controller->GenericDiskInfo.part[MINOR(Inode->i_rdev)]
.nr_sects,
(long *) Argument);
+ case BLKGETSIZE64:
+ return put_user((u64)Controller->GenericDiskInfo.part[MINOR(Inode->i_rdev)].nr_sects << 9,
+ (u64 *) Argument);
case BLKRAGET:
case BLKRASET:
case BLKFLSBUF:
return put_user(acsi_part[MINOR(inode->i_rdev)].nr_sects,
(long *) arg);
+ case BLKGETSIZE64: /* Return device size */
+ return put_user((u64)acsi_part[MINOR(inode->i_rdev)].nr_sects << 9,
+ (u64 *) arg);
+
case BLKROSET:
case BLKROGET:
case BLKFLSBUF:
case BLKGETSIZE:
return put_user(unit[drive].blocks,(long *)param);
break;
+ case BLKGETSIZE64:
+ return put_user((u64)unit[drive].blocks << 9, (u64 *)param);
+ break;
case FDSETPRM:
case FDDEFPRM:
return -EINVAL;
case BLKGETSIZE:
/* Today get_gendisk() requires a linear scan;
add this when dev has pointer type. */
+ /* add BLKGETSIZE64 too */
g = get_gendisk(dev);
if (!g)
longval = 0;
put_user(hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect, &geo->start);
return 0;
case BLKGETSIZE:
- if (!arg) return -EINVAL;
put_user(hba[ctlr]->hd[MINOR(inode->i_rdev)].nr_sects, (long*)arg);
return 0;
+ case BLKGETSIZE64:
+ put_user((u64)hba[ctlr]->hd[MINOR(inode->i_rdev)].nr_sects << 9, (u64*)arg);
+ return 0;
case BLKRRPART:
return revalidate_logvol(inode->i_rdev, 1);
case BLKFLSBUF:
case IDAGETDRVINFO:
return copy_to_user(&io->c.drv,&hba[ctlr]->drv[dsk],sizeof(drv_info_t));
case BLKGETSIZE:
- if (!arg) return -EINVAL;
- put_user(ida[(ctlr<<CTLR_SHIFT)+MINOR(inode->i_rdev)].nr_sects, (long*)arg);
- return 0;
+ return put_user(ida[(ctlr<<CTLR_SHIFT)+MINOR(inode->i_rdev)].nr_sects, (long*)arg);
+ case BLKGETSIZE64:
+ return put_user((u64)(ida[(ctlr<<CTLR_SHIFT)+MINOR(inode->i_rdev)].nr_sects) << 9, (u64*)arg);
case BLKRRPART:
return revalidate_logvol(inode->i_rdev, 1);
case IDAPASSTHRU:
case BLKGETSIZE:
ECALL(get_floppy_geometry(drive, type, &g));
return put_user(g->size, (long *) param);
+
+ case BLKGETSIZE64:
+ ECALL(get_floppy_geometry(drive, type, &g));
+ return put_user((u64)g->size << 9, (u64 *) param);
/* BLKRRPART is not defined as floppies don't have
* partition tables */
}
err = -ENXIO;
break;
}
- if (!arg) {
- err = -EINVAL;
+ err = put_user(loop_sizes[lo->lo_number] << 1, (long *) arg);
+ break;
+ case BLKGETSIZE64:
+ if (lo->lo_state != Lo_bound) {
+ err = -ENXIO;
break;
}
- err = put_user(loop_sizes[lo->lo_number] << 1, (long *) arg);
+ err = put_user((u64)loop_sizes[lo->lo_number] << 10, (u64*)arg);
break;
case BLKBSZGET:
case BLKBSZSET:
#endif
case BLKGETSIZE:
return put_user(nbd_bytesizes[dev] >> 9, (long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)nbd_bytesizes[dev], (u64 *) arg);
}
return -EINVAL;
}
static struct hd_struct pd_hd[PD_DEVS];
static int pd_sizes[PD_DEVS];
static int pd_blocksizes[PD_DEVS];
+static int pd_maxsectors[PD_DEVS];
#define PD_NAMELEN 8
}
}
-static inline int pd_new_segment(request_queue_t *q, struct request *req, int max_segments)
-{
- if (max_segments > cluster)
- max_segments = cluster;
-
- if (req->nr_segments < max_segments) {
- req->nr_segments++;
- return 1;
- }
- return 0;
-}
-
-static int pd_back_merge_fn(request_queue_t *q, struct request *req,
- struct buffer_head *bh, int max_segments)
-{
- if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
- return 1;
- return pd_new_segment(q, req, max_segments);
-}
-
-static int pd_front_merge_fn(request_queue_t *q, struct request *req,
- struct buffer_head *bh, int max_segments)
-{
- if (bh->b_data + bh->b_size == req->bh->b_data)
- return 1;
- return pd_new_segment(q, req, max_segments);
-}
-
-static int pd_merge_requests_fn(request_queue_t *q, struct request *req,
- struct request *next, int max_segments)
-{
- int total_segments = req->nr_segments + next->nr_segments;
- int same_segment;
-
- if (max_segments > cluster)
- max_segments = cluster;
-
- same_segment = 0;
- if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data) {
- total_segments--;
- same_segment = 1;
- }
-
- if (total_segments > max_segments)
- return 0;
-
- req->nr_segments = total_segments;
- return 1;
-}
-
int pd_init (void)
{ int i;
}
q = BLK_DEFAULT_QUEUE(MAJOR_NR);
blk_init_queue(q, DEVICE_REQUEST);
- q->back_merge_fn = pd_back_merge_fn;
- q->front_merge_fn = pd_front_merge_fn;
- q->merge_requests_fn = pd_merge_requests_fn;
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
pd_gendisk.major = major;
for(i=0;i<PD_DEVS;i++) pd_blocksizes[i] = 1024;
blksize_size[MAJOR_NR] = pd_blocksizes;
+ for(i=0;i<PD_DEVS;i++) pd_maxsectors[i] = cluster;
+ max_sectors[MAJOR_NR] = pd_maxsectors;
+
printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
name,name,PD_VERSION,major,cluster,nice);
pd_init_units();
if (err) return (err);
put_user(pd_hd[dev].nr_sects,(long *) arg);
return (0);
+ case BLKGETSIZE64:
+ return put_user((u64)pd_hd[dev].nr_sects << 9, (u64 *)arg);
case BLKRRPART:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
devfs_unregister_blkdev(MAJOR_NR,name);
del_gendisk(&pd_gendisk);
+
+ for (unit=0;unit<PD_UNITS;unit++)
+ if (PD.present) pi_release(PI);
+
+ max_sectors[MAJOR_NR] = NULL;
}
#endif
put_user(0,(long *)&geo->start);
return 0;
case BLKGETSIZE:
- if (!arg) return -EINVAL;
- err = verify_area(VERIFY_WRITE,(long *) arg,sizeof(long));
- if (err) return (err);
- put_user(PF.capacity,(long *) arg);
- return (0);
+ return put_user(PF.capacity,(long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)PF.capacity << 9,(u64 *)arg);
case BLKROSET:
case BLKROGET:
case BLKRASET:
}
break;
+ case BLKGETSIZE64:
+ return put_user((u64)ps2esdi[MINOR(inode->i_rdev)].nr_sects << 9, (u64 *) arg);
+
case BLKRRPART:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!arg) return -EINVAL;
return put_user(rd_kbsize[minor] << 1, (long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)rd_kbsize[minor] << 10, (u64*)arg);
+
case BLKROSET:
case BLKROGET:
case BLKSSZGET:
case BLKGETSIZE:
if (!arg) return -EINVAL;
return put_user(xd_struct[MINOR(inode->i_rdev)].nr_sects,(long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)xd_struct[MINOR(inode->i_rdev)].nr_sects << 9, (u64 *)arg);
case HDIO_SET_DMA:
if (!capable(CAP_SYS_ADMIN)) return -EACCES;
if (xdc_busy) return -EBUSY;
/* adjust RQ depth */
command =
((command & ~0xff000000) |
- min(u32, (mode & 0xff000000),
- min(u32, (command & 0xff000000),
+ min_t(u32, (mode & 0xff000000),
+ min_t(u32, (command & 0xff000000),
(scratch & 0xff000000))));
/* disable SBA if it's not supported */
/* adjust RQ depth */
command =
((command & ~0xff000000) |
- min(u32, (mode & 0xff000000),
- min(u32, (command & 0xff000000),
+ min_t(u32, (mode & 0xff000000),
+ min_t(u32, (command & 0xff000000),
(scratch & 0xff000000))));
/* disable SBA if it's not supported */
for performance, but because of buffer boundaries, there
may be several steps to the operation */
while(0 < (small_count =
- min(unsigned int, (rx_bufsize - new_rx_get),
- min(unsigned int, (TTY_FLIPBUF_SIZE - tty->flip.count), char_count))
+ min_t(unsigned int, (rx_bufsize - new_rx_get),
+ min_t(unsigned int, (TTY_FLIPBUF_SIZE - tty->flip.count), char_count))
)) {
memcpy_fromio(tty->flip.char_buf_ptr,
(char *)(cinfo->base_addr
}
#ifdef BLOCKMOVE
while(0 < (small_count =
- min(unsigned int, (tx_bufsize - tx_put),
- min(unsigned int, (SERIAL_XMIT_SIZE - info->xmit_tail),
- min(unsigned int, info->xmit_cnt, char_count))))) {
+ min_t(unsigned int, (tx_bufsize - tx_put),
+ min_t(unsigned int, (SERIAL_XMIT_SIZE - info->xmit_tail),
+ min_t(unsigned int, info->xmit_cnt, char_count))))) {
memcpy_toio((char *)(cinfo->base_addr + tx_bufaddr + tx_put),
&info->xmit_buf[info->xmit_tail],
{ \
long i, t, m; \
while (count > 0) { \
- m = min(unsigned long, count, maxio); \
+ m = min_t(unsigned long, count, maxio); \
for (i = 0; i < m; i++) { \
for (t = 0; t < timeout && !ENABLE; t++) \
wait_some(HZ/50); \
return -EINVAL;
}
- kbuffer = kmalloc(min(unsigned int, count, PP_BUFFER_SIZE), GFP_KERNEL);
+ kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
if (!kbuffer) {
return -ENOMEM;
}
mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
while (bytes_read < count) {
- ssize_t need = min(unsigned long, count - bytes_read, PP_BUFFER_SIZE);
+ ssize_t need = min_t(unsigned long, count - bytes_read, PP_BUFFER_SIZE);
if (mode == IEEE1284_MODE_EPP) {
/* various specials for EPP mode */
return -EINVAL;
}
- kbuffer = kmalloc(min(unsigned int, count, PP_BUFFER_SIZE), GFP_KERNEL);
+ kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
if (!kbuffer) {
return -ENOMEM;
}
mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
while (bytes_written < count) {
- ssize_t n = min(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
+ ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
if (copy_from_user (kbuffer, buf + bytes_written, n)) {
bytes_written = -EFAULT;
** and available space.
*/
- transCount = min(unsigned int, PacketP->len & PKT_LEN_MASK,
+ transCount = min_t(unsigned int, PacketP->len & PKT_LEN_MASK,
TTY_FLIPBUF_SIZE - TtyP->flip.count);
rio_dprintk (RIO_DEBUG_REC, "port %d: Copy %d bytes\n",
PortP->PortNum, transCount);
switch (cmd) {
case BLKGETSIZE:
return put_user(i2ob[minor].nr_sects, (long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)i2ob[minor].nr_sects << 9, (u64 *)arg);
case HDIO_GETGEO:
{
}
case BLKGETSIZE: /* Return device size */
- if (!arg) return -EINVAL;
return put_user(hd[MINOR(inode->i_rdev)].nr_sects,
(long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)hd[MINOR(inode->i_rdev)].nr_sects << 9,
+ (u64 *) arg);
case BLKRRPART: /* Re-read partition tables */
if (!capable(CAP_SYS_ADMIN))
case BLKGETSIZE: /* Return device size */
return put_user(drive->part[MINOR(inode->i_rdev)&PARTN_MASK].nr_sects, (long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)drive->part[MINOR(inode->i_rdev)&PARTN_MASK].nr_sects << 9, (u64 *) arg);
case BLKRRPART: /* Re-read partition tables */
if (!capable(CAP_SYS_ADMIN)) return -EACCES;
blksize_org = lvm_get_blksize(org_phys_dev);
blksize_snap = lvm_get_blksize(snap_phys_dev);
- max_blksize = max(int, blksize_org, blksize_snap);
- min_blksize = min(int, blksize_org, blksize_snap);
+ max_blksize = max(blksize_org, blksize_snap);
+ min_blksize = min(blksize_org, blksize_snap);
max_sectors = KIO_MAX_SECTORS * (min_blksize>>9);
if (chunk_size % (max_blksize>>9))
while (chunk_size)
{
- nr_sectors = min(int, chunk_size, max_sectors);
+ nr_sectors = min(chunk_size, max_sectors);
chunk_size -= nr_sectors;
iobuf->length = nr_sectors << 9;
buckets = lv->lv_remap_end;
max_buckets = calc_max_buckets();
- buckets = min(unsigned long, buckets, max_buckets);
+ buckets = min(buckets, max_buckets);
while (buckets & (buckets-1))
buckets &= (buckets-1);
return -EFAULT;
break;
+ case BLKGETSIZE64:
+ if (put_user((u64)lv_ptr->lv_size << 9, (u64 *)arg))
+ return -EFAULT;
+ break;
+
case BLKFLSBUF:
/* flush buffer cache */
(long *) arg);
goto done;
+ case BLKGETSIZE64: /* Return device size */
+ err = md_put_user((u64)md_hd_struct[minor].nr_sects << 9,
+ (u64 *) arg);
+ goto done;
+
case BLKRAGET:
case BLKRASET:
case BLKFLSBUF:
den = 0;
/* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */
- for (i = min(int, deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) {
+ for (i = min_t(int, deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) {
if(lambda[i+1] != A0)
den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
}
put_user(ftl_hd[minor].start_sect, (u_long *)&geo->start);
break;
case BLKGETSIZE:
- ret = verify_area(VERIFY_WRITE, (long *)arg, sizeof(long));
- if (ret) return ret;
- put_user(ftl_hd[minor].nr_sects,
- (long *)arg);
+ ret = put_user(ftl_hd[minor].nr_sects, (long *)arg);
+ break;
+ case BLKGETSIZE64:
+ ret = put_user((u64)ftl_hd[minor].nr_sects << 9, (u64 *)arg);
break;
case BLKRRPART:
ret = ftl_reread_partitions(minor);
switch (cmd) {
case BLKGETSIZE: /* Return device size */
- if (!arg)
- return -EFAULT;
- return put_user((mtdblk->mtd->size >> 9),
- (long *) arg)?-EFAULT:0;
+ return put_user((mtdblk->mtd->size >> 9), (long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)mtdblk->mtd->size, (u64 *)arg);
case BLKFLSBUF:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
switch (cmd) {
case BLKGETSIZE: /* Return device size */
- if (!arg) return -EFAULT;
- return Put_user((mtd->size >> 9),
- (long *) arg);
+ return put_user((mtd->size >> 9), (long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)mtd->size, (u64 *)arg);
case BLKFLSBUF:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
return copy_to_user((void *)arg, &g, sizeof g) ? -EFAULT : 0;
}
case BLKGETSIZE: /* Return device size */
- if (!arg) return -EINVAL;
return put_user(part_table[MINOR(inode->i_rdev)].nr_sects,
(long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)part_table[MINOR(inode->i_rdev)].nr_sects << 9,
+ (u64 *)arg);
case BLKFLSBUF:
if (!capable(CAP_SYS_ADMIN)) return -EACCES;
return;
while (size > 0) {
- tsize = min(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
- min(u32, size, ACE_WINDOW_SIZE));
+ tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
+ min_t(u32, size, ACE_WINDOW_SIZE));
tdest = (unsigned long)®s->Window +
(dest & (ACE_WINDOW_SIZE - 1));
writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
return;
while (size > 0) {
- tsize = min(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
- min(u32, size, ACE_WINDOW_SIZE));
+ tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
+ min_t(u32, size, ACE_WINDOW_SIZE));
tdest = (unsigned long)®s->Window +
(dest & (ACE_WINDOW_SIZE - 1));
writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
*/
if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf-2*DE600_MIN_WINDOW) return(0);
- amt = min(int, (sk->rcvbuf-atomic_read(&sk->rmem_alloc))/2/*-DE600_MIN_WINDOW*/, DE600_MAX_WINDOW);
+ amt = min_t(int, (sk->rcvbuf-atomic_read(&sk->rmem_alloc))/2/*-DE600_MIN_WINDOW*/, DE600_MAX_WINDOW);
if (amt < 0) return(0);
return(amt);
}
goto no_resources;
}
- amt = min(unsigned int, len, rbdp->size - count);
+ amt = min_t(unsigned int, len, rbdp->size - count);
memcpy( (char *) S2H(rbdp->buf) + count, skb->data + i, amt);
i += amt;
count += amt;
while (cnt > 0) {
switch (bc->hdlctx.state) {
case tx_keyup:
- i = min(int, cnt, bc->hdlctx.flags);
+ i = min_t(int, cnt, bc->hdlctx.flags);
cnt -= i;
bc->hdlctx.flags -= i;
if (bc->hdlctx.flags <= 0)
break;
}
}
- i = min(int, cnt, bc->hdlctx.bufcnt);
+ i = min_t(int, cnt, bc->hdlctx.bufcnt);
bc->hdlctx.bufcnt -= i;
cnt -= i;
if (i != pp->ops->epp_write_data(pp, bc->hdlctx.bufptr, i, 0))
bc->hdlctx.state = tx_data;
break;
}
- i = min(int, cnt, bc->hdlctx.flags);
+ i = min_t(int, cnt, bc->hdlctx.flags);
if (i) {
cnt -= i;
bc->hdlctx.flags -= i;
default: /* fall through */
if (bc->hdlctx.calibrate <= 0)
return 0;
- i = min(int, cnt, bc->hdlctx.calibrate);
+ i = min_t(int, cnt, bc->hdlctx.calibrate);
cnt -= i;
bc->hdlctx.calibrate -= i;
memset(tmp, 0, sizeof(tmp));
if (buffer_length >= len - offset) {
*eof = 1;
}
- return (min(int, buffer_length, len - offset));
+ return (min_t(int, buffer_length, len - offset));
}
if (!(page = (char *)__get_free_page(GFP_KERNEL))) {
return -ENOMEM;
}
- if(copy_from_user(page, buffer, count = (min(int, count, PAGE_SIZE))))
+ if(copy_from_user(page, buffer, count = (min_t(int, count, PAGE_SIZE))))
{
count = -EFAULT;
goto out;
len = sprintf(page, "external\n");
}
} else if (strcmp(file->name, FILENAME_FIRMWARE) == 0) {
- len = min(int, FILE_PAGESIZE,
- min(int, count,
+ len = min_t(int, FILE_PAGESIZE,
+ min_t(int, count,
hw->firmware ?
(hw->firmware->len - off) : 0));
if (len < 0) {
if (count >= len - off) {
*eof = 1;
}
- return min(int, count, len - off);
+ return min_t(int, count, len - off);
}
/* Called on echo comx >boardtype */
if (count >= len - off) {
*eof = 1;
}
- return min(int, count, len - off);
+ return min_t(int, count, len - off);
}
static int locomx_write_proc(struct file *file, const char *buffer,
return -ENOMEM;
}
- copy_from_user(page, buffer, count = min(unsigned long, count, PAGE_SIZE));
+ copy_from_user(page, buffer, count = min_t(unsigned long, count, PAGE_SIZE));
if (*(page + count - 1) == '\n') {
*(page + count - 1) = 0;
}
outsb(dev->base_addr + HSCX_FIFO,
- &(hw->sending->data[hw->tx_ptr]), min(unsigned int, to_send, 32));
+ &(hw->sending->data[hw->tx_ptr]), min_t(unsigned int, to_send, 32));
if (to_send <= 32) {
hscx_cmd(dev, HSCX_XTF | HSCX_XME);
kfree_skb(hw->sending);
}
*start = page + off;
if (count >= len - off) *eof = 1;
- return min(int, count, len - off);
+ return min_t(int, count, len - off);
}
return -ENOMEM;
}
- copy_from_user(page, buffer, count = min(unsigned long, count, PAGE_SIZE));
+ copy_from_user(page, buffer, count = min_t(unsigned long, count, PAGE_SIZE));
if (*(page + count - 1) == '\n') {
*(page + count - 1) = 0;
}
*start = page + off;
if (count >= len - off) *eof = 1;
- return min(int, count, len - off);
+ return min_t(int, count, len - off);
}
static int fr_write_proc(struct file *file, const char *buffer,
if (count >= len - off) {
*eof = 1;
}
- return min(int, count, len - off);
+ return min_t(int, count, len - off);
}
static int comxlapb_write_proc(struct file *file, const char *buffer,
int free = (ch->debug_start - ch->debug_end + ch->debug_size)
% ch->debug_size;
- to_copy = min(int, free ? free : ch->debug_size,
- min(int, ch->debug_size - ch->debug_end, len));
+ to_copy = min_t(int, free ? free : ch->debug_size,
+ min_t(int, ch->debug_size - ch->debug_end, len));
memcpy(ch->debug_area + ch->debug_end, str, to_copy);
str += to_copy;
len -= to_copy;
if (count >= len - off) {
*eof = 1;
}
- return min(int, count, len - off);
+ return min_t(int, count, len - off);
}
if (count >= len - off) {
*eof = 1;
}
- return min(int, count, len - off);
+ return min_t(int, count, len - off);
}
fullname, DRV_VERSION, DRV_RELEASE, copyright);
/* Verify number of cards and allocate adapter data space */
- ncards = min(int, ncards, MAX_CARDS);
- ncards = max(int, ncards, 1);
+ ncards = min_t(int, ncards, MAX_CARDS);
+ ncards = max_t(int, ncards, 1);
card_array = kmalloc(sizeof(cycx_t) * ncards, GFP_KERNEL);
if (!card_array)
goto out;
cfg.flags = 0; /* FIXME just reset the 2nd bit */
if (conf->u.x25.hi_pvc) {
- card->u.x.hi_pvc = min(unsigned int, conf->u.x25.hi_pvc, 4095);
- card->u.x.lo_pvc = min(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc);
+ card->u.x.hi_pvc = min_t(unsigned int, conf->u.x25.hi_pvc, 4095);
+ card->u.x.lo_pvc = min_t(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc);
}
if (conf->u.x25.hi_svc) {
- card->u.x.hi_svc = min(unsigned int, conf->u.x25.hi_svc, 4095);
- card->u.x.lo_svc = min(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc);
+ card->u.x.hi_svc = min_t(unsigned int, conf->u.x25.hi_svc, 4095);
+ card->u.x.lo_svc = min_t(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc);
}
if (card->u.x.lo_pvc == 255)
cfg.nvc = card->u.x.hi_svc - card->u.x.lo_svc + 1 + cfg.npvc;
if (conf->u.x25.hdlc_window)
- cfg.n2win = min(unsigned int, conf->u.x25.hdlc_window, 7);
+ cfg.n2win = min_t(unsigned int, conf->u.x25.hdlc_window, 7);
if (conf->u.x25.pkt_window)
- cfg.n3win = min(unsigned int, conf->u.x25.pkt_window, 7);
+ cfg.n3win = min_t(unsigned int, conf->u.x25.pkt_window, 7);
if (conf->u.x25.t1)
- cfg.t1 = min(unsigned int, conf->u.x25.t1, 30);
+ cfg.t1 = min_t(unsigned int, conf->u.x25.t1, 30);
if (conf->u.x25.t2)
- cfg.t2 = min(unsigned int, conf->u.x25.t2, 30);
+ cfg.t2 = min_t(unsigned int, conf->u.x25.t2, 30);
if (conf->u.x25.t11_t21)
- cfg.t21 = min(unsigned int, conf->u.x25.t11_t21, 30);
+ cfg.t21 = min_t(unsigned int, conf->u.x25.t11_t21, 30);
if (conf->u.x25.t13_t23)
- cfg.t23 = min(unsigned int, conf->u.x25.t13_t23, 30);
+ cfg.t23 = min_t(unsigned int, conf->u.x25.t13_t23, 30);
if (conf->u.x25.n2)
- cfg.n2 = min(unsigned int, conf->u.x25.n2, 30);
+ cfg.n2 = min_t(unsigned int, conf->u.x25.n2, 30);
/* initialize adapter */
if (x25_configure(card, &cfg))
struct net_local *nl = (struct net_local *) dev->priv;
struct sk_buff *skb = nl->tx_buf_p;
- unsigned len = min(unsigned int, skb->len - nl->outpos, nl->framelen);
+ unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
*crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
nl->outpos += nl->framelen;
if( --nl->tx_frameno )
- nl->framelen = min(unsigned int,
+ nl->framelen = min_t(unsigned int,
nl->maxframe,
nl->tx_buf_p->len - nl->outpos);
else
/* For Primary Port 0 */
card->wandev.mtu =
(conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
- min(unsigned int, conf->mtu, PRI_MAX_NO_DATA_BYTES_IN_FRAME) :
+ min_t(unsigned int, conf->mtu, PRI_MAX_NO_DATA_BYTES_IN_FRAME) :
CHDLC_DFLT_DATA_LEN;
} else if(port_num == WANOPT_SEC) {
/* For Secondary Port 1 */
card->wandev.mtu =
(conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
- min(unsigned int, conf->mtu, SEC_MAX_NO_DATA_BYTES_IN_FRAME) :
+ min_t(unsigned int, conf->mtu, SEC_MAX_NO_DATA_BYTES_IN_FRAME) :
CHDLC_DFLT_DATA_LEN;
}
card->u.c.kpalv_tx =
((conf->keepalive_tx_tmr - MIN_Tx_KPALV_TIMER)
>= 0) ?
- min(unsigned int, conf->keepalive_tx_tmr,MAX_Tx_KPALV_TIMER) :
+ min_t(unsigned int, conf->keepalive_tx_tmr,MAX_Tx_KPALV_TIMER) :
DEFAULT_Tx_KPALV_TIMER;
card->u.c.kpalv_rx =
((conf->keepalive_rx_tmr - MIN_Rx_KPALV_TIMER)
>= 0) ?
- min(unsigned int, conf->keepalive_rx_tmr,MAX_Rx_KPALV_TIMER) :
+ min_t(unsigned int, conf->keepalive_rx_tmr,MAX_Rx_KPALV_TIMER) :
DEFAULT_Rx_KPALV_TIMER;
card->u.c.kpalv_err =
((conf->keepalive_err_margin-MIN_KPALV_ERR_TOL)
>= 0) ?
- min(unsigned int, conf->keepalive_err_margin,
+ min_t(unsigned int, conf->keepalive_err_margin,
MAX_KPALV_ERR_TOL) :
DEFAULT_KPALV_ERR_TOL;
}
/* Setup slarp timer to control delay between slarps */
card->u.c.slarp_timer =
((conf->slarp_timer - MIN_SLARP_REQ_TIMER) >= 0) ?
- min(unsigned int, conf->slarp_timer, MAX_SLARP_REQ_TIMER) :
+ min_t(unsigned int, conf->slarp_timer, MAX_SLARP_REQ_TIMER) :
DEFAULT_SLARP_REQ_TIMER;
#ifdef LINUX_2_0
/* Adjust configuration */
conf->mtu += FR_HEADER_LEN;
conf->mtu = (conf->mtu >= MIN_LGTH_FR_DATA_CFG) ?
- min(unsigned int, conf->mtu, FR_MAX_NO_DATA_BYTES_IN_FRAME) :
+ min_t(unsigned int, conf->mtu, FR_MAX_NO_DATA_BYTES_IN_FRAME) :
FR_CHANNEL_MTU + FR_HEADER_LEN;
- conf->bps = min(unsigned int, conf->bps, 2048000);
+ conf->bps = min_t(unsigned int, conf->bps, 2048000);
/* Initialze the configuration structure sent to the board to zero */
memset(&u.cfg, 0, sizeof(u.cfg));
* command in fr_configure() routine.
*/
- card->u.f.dlci_num = min(unsigned int, max(unsigned int, conf->u.fr.dlci_num, 1), 100);
+ card->u.f.dlci_num = min_t(unsigned int, max_t(unsigned int, conf->u.fr.dlci_num, 1), 100);
for ( i = 0; i < card->u.f.dlci_num; i++) {
u.cfg.port |= 0x0002;
if (conf->u.fr.t391)
- u.cfg.t391 = min(unsigned int, conf->u.fr.t391, 30);
+ u.cfg.t391 = min_t(unsigned int, conf->u.fr.t391, 30);
else
u.cfg.t391 = 5;
if (conf->u.fr.t392)
- u.cfg.t392 = min(unsigned int, conf->u.fr.t392, 30);
+ u.cfg.t392 = min_t(unsigned int, conf->u.fr.t392, 30);
else
u.cfg.t392 = 15;
if (conf->u.fr.n391)
- u.cfg.n391 = min(unsigned int, conf->u.fr.n391, 255);
+ u.cfg.n391 = min_t(unsigned int, conf->u.fr.n391, 255);
else
u.cfg.n391 = 2;
if (conf->u.fr.n392)
- u.cfg.n392 = min(unsigned int, conf->u.fr.n392, 10);
+ u.cfg.n392 = min_t(unsigned int, conf->u.fr.n392, 10);
else
u.cfg.n392 = 3;
if (conf->u.fr.n393)
- u.cfg.n393 = min(unsigned int, conf->u.fr.n393, 10);
+ u.cfg.n393 = min_t(unsigned int, conf->u.fr.n393, 10);
else
u.cfg.n393 = 4;
*/
if (conf->cir) {
- chan->cir = max(unsigned int, 1,
- min(unsigned int, conf->cir, 512));
+ chan->cir = max_t(unsigned int, 1,
+ min_t(unsigned int, conf->cir, 512));
chan->cir_status = CIR_ENABLED;
chan->bc = chan->cir;
if (conf->be){
- chan->be = max(unsigned int,
- 0, min(unsigned int, conf->be, 511));
+ chan->be = max_t(unsigned int,
+ 0, min_t(unsigned int, conf->be, 511));
}else{
conf->be = 0;
}
printk(KERN_INFO "%s: running PPP firmware v%s\n",card->devname, u.str);
/* Adjust configuration and set defaults */
card->wandev.mtu = (conf->mtu) ?
- min(unsigned int, conf->mtu, PPP_MAX_MTU) : PPP_DFLT_MTU;
+ min_t(unsigned int, conf->mtu, PPP_MAX_MTU) : PPP_DFLT_MTU;
card->wandev.bps = conf->bps;
card->wandev.interface = conf->interface;
dev->init = &if_init;
dev->priv = ppp_priv_area;
- dev->mtu = min(unsigned int, dev->mtu, card->wandev.mtu);
+ dev->mtu = min_t(unsigned int, dev->mtu, card->wandev.mtu);
/* Initialize the polling task routine */
#ifndef LINUX_2_4
u.cfg.defPktSize = u.cfg.pktMTU = card->wandev.mtu;
if (conf->u.x25.hi_pvc){
- card->u.x.hi_pvc = min(unsigned int, conf->u.x25.hi_pvc, MAX_LCN_NUM);
- card->u.x.lo_pvc = min(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc);
+ card->u.x.hi_pvc = min_t(unsigned int, conf->u.x25.hi_pvc, MAX_LCN_NUM);
+ card->u.x.lo_pvc = min_t(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc);
}
if (conf->u.x25.hi_svc){
- card->u.x.hi_svc = min(unsigned int, conf->u.x25.hi_svc, MAX_LCN_NUM);
- card->u.x.lo_svc = min(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc);
+ card->u.x.hi_svc = min_t(unsigned int, conf->u.x25.hi_svc, MAX_LCN_NUM);
+ card->u.x.lo_svc = min_t(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc);
}
/* Figure out the total number of channels to configure */
u.cfg.hiTwoWaySVC = card->u.x.hi_svc;
if (conf->u.x25.hdlc_window)
- u.cfg.hdlcWindow = min(unsigned int, conf->u.x25.hdlc_window, 7);
+ u.cfg.hdlcWindow = min_t(unsigned int, conf->u.x25.hdlc_window, 7);
if (conf->u.x25.pkt_window)
- u.cfg.pktWindow = min(unsigned int, conf->u.x25.pkt_window, 7);
+ u.cfg.pktWindow = min_t(unsigned int, conf->u.x25.pkt_window, 7);
if (conf->u.x25.t1)
- u.cfg.t1 = min(unsigned int, conf->u.x25.t1, 30);
+ u.cfg.t1 = min_t(unsigned int, conf->u.x25.t1, 30);
if (conf->u.x25.t2)
- u.cfg.t2 = min(unsigned int, conf->u.x25.t2, 29);
+ u.cfg.t2 = min_t(unsigned int, conf->u.x25.t2, 29);
if (conf->u.x25.t4)
- u.cfg.t4 = min(unsigned int, conf->u.x25.t4, 240);
+ u.cfg.t4 = min_t(unsigned int, conf->u.x25.t4, 240);
if (conf->u.x25.n2)
- u.cfg.n2 = min(unsigned int, conf->u.x25.n2, 30);
+ u.cfg.n2 = min_t(unsigned int, conf->u.x25.n2, 30);
if (conf->u.x25.t10_t20)
- u.cfg.t10t20 = min(unsigned int, conf->u.x25.t10_t20,255);
+ u.cfg.t10t20 = min_t(unsigned int, conf->u.x25.t10_t20,255);
if (conf->u.x25.t11_t21)
- u.cfg.t11t21 = min(unsigned int, conf->u.x25.t11_t21,255);
+ u.cfg.t11t21 = min_t(unsigned int, conf->u.x25.t11_t21,255);
if (conf->u.x25.t12_t22)
- u.cfg.t12t22 = min(unsigned int, conf->u.x25.t12_t22,255);
+ u.cfg.t12t22 = min_t(unsigned int, conf->u.x25.t12_t22,255);
if (conf->u.x25.t13_t23)
- u.cfg.t13t23 = min(unsigned int, conf->u.x25.t13_t23,255);
+ u.cfg.t13t23 = min_t(unsigned int, conf->u.x25.t13_t23,255);
if (conf->u.x25.t16_t26)
- u.cfg.t16t26 = min(unsigned int, conf->u.x25.t16_t26, 255);
+ u.cfg.t16t26 = min_t(unsigned int, conf->u.x25.t16_t26, 255);
if (conf->u.x25.t28)
- u.cfg.t28 = min(unsigned int, conf->u.x25.t28, 255);
+ u.cfg.t28 = min_t(unsigned int, conf->u.x25.t28, 255);
if (conf->u.x25.r10_r20)
- u.cfg.r10r20 = min(unsigned int, conf->u.x25.r10_r20,250);
+ u.cfg.r10r20 = min_t(unsigned int, conf->u.x25.r10_r20,250);
if (conf->u.x25.r12_r22)
- u.cfg.r12r22 = min(unsigned int, conf->u.x25.r12_r22,250);
+ u.cfg.r12r22 = min_t(unsigned int, conf->u.x25.r12_r22,250);
if (conf->u.x25.r13_r23)
- u.cfg.r13r23 = min(unsigned int, conf->u.x25.r13_r23,250);
+ u.cfg.r13r23 = min_t(unsigned int, conf->u.x25.r13_r23,250);
if (conf->u.x25.ccitt_compat)
/* For Primary Port 0 */
card->wandev.mtu =
(conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
- min(unsigned int, conf->mtu, PRI_MAX_NO_DATA_BYTES_IN_FRAME) :
+ min_t(unsigned int, conf->mtu, PRI_MAX_NO_DATA_BYTES_IN_FRAME) :
CHDLC_DFLT_DATA_LEN;
} else if(port_num == WANOPT_SEC) {
/* For Secondary Port 1 */
card->wandev.mtu =
(conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
- min(unsigned int, conf->mtu, SEC_MAX_NO_DATA_BYTES_IN_FRAME) :
+ min_t(unsigned int, conf->mtu, SEC_MAX_NO_DATA_BYTES_IN_FRAME) :
CHDLC_DFLT_DATA_LEN;
}
// read the rid length field
bap_read(ai, pBuf, 2, BAP1);
// length for remaining part of rid
- len = min(unsigned int, len, le16_to_cpu(*(u16*)pBuf)) - 2;
+ len = min_t(unsigned int, len, le16_to_cpu(*(u16*)pBuf)) - 2;
if ( len <= 2 ) {
printk( KERN_ERR
*/
if (copy_to_user(comp->data, iobuf,
- min(unsigned int, comp->len, sizeof(iobuf))))
+ min_t(unsigned int, comp->len, sizeof(iobuf))))
return -EFAULT;
return 0;
}
PC4500_readrid(dev->priv,ridcode,iobuf,sizeof(iobuf));
if (copy_to_user(comp->data, iobuf,
- min(unsigned int, comp->len, sizeof(iobuf))))
+ min_t(unsigned int, comp->len, sizeof(iobuf))))
return -EFAULT;
return 0;
case BLKGETSIZE:{ /* Return device size */
long blocks = major_info->gendisk.sizes
[MINOR (inp->i_rdev)] << 1;
- rc =
- copy_to_user ((long *) data, &blocks,
- sizeof (long));
- if (rc)
- rc = -EFAULT;
+ rc = put_user(blocks, (long *)arg);
+ break;
+ }
+ case BLKGETSIZE64:{
+ u64 blocks = major_info->gendisk.sizes
+ [MINOR (inp->i_rdev)];
+ rc = put_user(blocks << 10, (u64 *)arg);
break;
}
case BLKRRPART:{
case BLKGETSIZE: /* 0x1260 */
/* Return the device size, expressed in sectors */
- if (!arg) return -EINVAL; /* NULL pointer: not valid */
- err= 0; /* verify_area_20(VERIFY_WRITE, (long *) arg, sizeof(long));
- * if (err) return err;
- */
- put_user ( 1024* xpram_sizes[MINOR(inode->i_rdev)]
+ return put_user( 1024* xpram_sizes[MINOR(inode->i_rdev)]
/ XPRAM_SOFTSECT,
(long *) arg);
- return 0;
+
+ case BLKGETSIZE64:
+ return put_user( (u64)(1024* xpram_sizes[MINOR(inode->i_rdev)]
+ / XPRAM_SOFTSECT) << 9,
+ (u64 *) arg);
case BLKFLSBUF: /* flush, 0x1261 */
fsync_dev(inode->i_rdev);
if (residual_buffer)
*residual_buffer = parm.ipbfadr1;
} else {
- moved = min(unsigned int, buflen, 8);
+ moved = min_t(unsigned int, buflen, 8);
memcpy ((char *) buffer,
(char *) &parm.ipbfadr1, moved);
while ((moved < 8) && (moved < buflen)) {
dyn_len =
- min(unsigned int,
+ min_t(unsigned int,
(buffer + i)->length, need_to_move);
memcpy ((char *)((ulong)((buffer + i)->address)),
memset (iucv_userid[devnumber], ' ', 8);
memcpy (iucv_userid[devnumber], userid,
- min(unsigned int, strlen(userid), 8));
+ min_t(unsigned int, strlen(userid), 8));
dev = &iucv_netdev[devnumber];
sprintf (dev->name, "iucv%i", devnumber);
switch (cmd) {
case BLKGETSIZE:
return put_user(jsfd_bytesizes[dev] >> 9, (long *) arg);
+ case BLKGETSIZE64:
+ return put_user(jsfd_bytesizes[dev], (u64 *) arg);
#if 0
case BLKROSET:
IncStat(&cmd->SCp,1);
odd=FALSE;
}
- x=min(unsigned int,z,cmd->SCp.this_residual/2);
+ x=min_t(unsigned int,z,cmd->SCp.this_residual/2);
insw(base+HA_RDATA,cmd->SCp.ptr,x);
z-=x;
IncStat(&cmd->SCp,2*x);
z--;
odd=FALSE;
}
- x=min(unsigned int,z,cmd->SCp.this_residual/2);
+ x=min_t(unsigned int,z,cmd->SCp.this_residual/2);
outsw(base+HA_RDATA,cmd->SCp.ptr,x);
z-=x;
IncStat(&cmd->SCp,2*x);
static int *sd_sizes;
static int *sd_blocksizes;
static int *sd_hardsizes; /* Hardware sector size */
+static int *sd_max_sectors;
static int check_scsidisk_media_change(kdev_t);
static int fop_revalidate_scsidisk(kdev_t);
return 0;
}
case BLKGETSIZE: /* Return device size */
- if (!arg)
- return -EINVAL;
return put_user(sd[SD_PARTITION(inode->i_rdev)].nr_sects, (long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)sd[SD_PARTITION(inode->i_rdev)].nr_sects << 9, (u64 *)arg);
case BLKROSET:
case BLKROGET:
if (!sd_hardsizes)
goto cleanup_blocksizes;
+ sd_max_sectors = kmalloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC);
+ if (!sd_max_sectors)
+ goto cleanup_max_sectors;
+
for (i = 0; i < sd_template.dev_max << 4; i++) {
sd_blocksizes[i] = 1024;
sd_hardsizes[i] = 512;
+ /*
+ * Allow lowlevel device drivers to generate 512k large scsi
+ * commands if they know what they're doing and they ask for it
+ * explicitly via the SHpnt->max_sectors API.
+ */
+ sd_max_sectors[i] = MAX_SEGMENTS*8;
}
for (i = 0; i < N_USED_SD_MAJORS; i++) {
blksize_size[SD_MAJOR(i)] = sd_blocksizes + i * (SCSI_DISKS_PER_MAJOR << 4);
hardsect_size[SD_MAJOR(i)] = sd_hardsizes + i * (SCSI_DISKS_PER_MAJOR << 4);
+ max_sectors[SD_MAJOR(i)] = sd_max_sectors + i * (SCSI_DISKS_PER_MAJOR << 4);
}
+ /*
+ * FIXME: should unregister blksize_size, hardsect_size and max_sectors when
+ * the module is unloaded.
+ */
sd = kmalloc((sd_template.dev_max << 4) *
sizeof(struct hd_struct),
GFP_ATOMIC);
cleanup_sd_gendisks:
kfree(sd);
cleanup_sd:
+ kfree(sd_max_sectors);
+cleanup_max_sectors:
kfree(sd_hardsizes);
cleanup_blocksizes:
kfree(sd_blocksizes);
switch (cmd) {
case BLKGETSIZE:
return put_user(scsi_CDs[target].capacity, (long *) arg);
+ case BLKGETSIZE64:
+ return put_user((u64)scsi_CDs[target].capacity << 9, (u64 *)arg);
case BLKROSET:
case BLKROGET:
case BLKRASET:
this_id: 7, \
sg_tablesize: SCSI_NCR_SG_TABLESIZE, \
cmd_per_lun: SCSI_NCR_CMD_PER_LUN, \
+ max_sectors: MAX_SEGMENTS*8, \
use_clustering: DISABLE_CLUSTERING}
#else
ssize_t count, used;
u_char *p = &frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft);
+ count = min_t(unsigned long, userCount, frameLeft);
if (dmasound.soft.stereo)
count &= ~1;
used = count;
ssize_t count, used;
void *p = &frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft);
+ count = min_t(unsigned long, userCount, frameLeft);
if (dmasound.soft.stereo)
count &= ~1;
used = count;
if (!dmasound.soft.stereo) {
u_char *p = &frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft);
+ count = min_t(unsigned long, userCount, frameLeft);
used = count;
while (count > 0) {
u_char data;
}
} else {
u_short *p = (u_short *)&frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft)>>1;
+ count = min_t(unsigned long, userCount, frameLeft)>>1;
used = count*2;
while (count > 0) {
u_short data;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft)>>1;
+ count = min_t(unsigned long, userCount, frameLeft)>>1;
used = count*2;
while (count > 0) {
u_short data;
*frameUsed += used*2;
} else {
void *p = (u_short *)&frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft) & ~3;
+ count = min_t(unsigned long, userCount, frameLeft) & ~3;
used = count;
if (copy_from_user(p, userPtr, count))
return -EFAULT;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft)>>1;
+ count = min_t(unsigned long, userCount, frameLeft)>>1;
used = count*2;
while (count > 0) {
u_short data;
*frameUsed += used*2;
} else {
u_long *p = (u_long *)&frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft)>>2;
+ count = min_t(unsigned long, userCount, frameLeft)>>2;
used = count*4;
while (count > 0) {
u_long data;
count = frameLeft;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft)>>1;
+ count = min_t(unsigned long, userCount, frameLeft)>>1;
used = count*2;
while (count > 0) {
u_short data;
*frameUsed += used*2;
} else {
u_long *p = (u_long *)&frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft)>>2;
+ count = min_t(unsigned long, userCount, frameLeft)>>2;
used = count*4;
while (count > 0) {
u_long data;
count = frameLeft;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft)>>1;
+ count = min_t(unsigned long, userCount, frameLeft)>>1;
used = count*2;
while (count > 0) {
u_short data;
*frameUsed += used*2;
} else {
u_long *p = (u_long *)&frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft)>>2;
+ count = min_t(unsigned long, userCount, frameLeft)>>2;
used = count;
while (count > 0) {
u_long data;
frameLeft >>= 2;
if (stereo)
userCount >>= 1;
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(unsigned long, userCount, frameLeft);
while (count > 0) {
u_char data;
if (get_user(data, userPtr++))
frameLeft >>= 2;
if (stereo)
userCount >>= 1;
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(unsigned long, userCount, frameLeft);
while (count > 0) {
u_char data;
if (get_user(data, userPtr++))
frameLeft >>= 2;
if (stereo)
userCount >>= 1;
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(unsigned long, userCount, frameLeft);
while (count > 0) {
u_char data;
if (get_user(data, userPtr++))
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(unsigned long, userCount, frameLeft);
if (!stereo) {
short *up = (short *) userPtr;
while (count > 0) {
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(unsigned long, userCount, frameLeft);
while (count > 0) {
int data;
if (get_user(data, up++))
frameLeft >>= 2;
if (stereo)
userCount >>= 1;
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(unsigned long, userCount, frameLeft);
while (count > 0) {
u_char data;
frameLeft >>= 2;
if (stereo)
userCount >>= 1;
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(unsigned long, userCount, frameLeft);
while (count > 0) {
u_char data;
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(unsigned long, userCount, frameLeft);
if (!stereo) {
short *up = (short *) userPtr;
while (count > 0) {
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(unsigned long, userCount, frameLeft);
while (count > 0) {
int data;
if (!dmasound.soft.stereo) {
void *p = &frame[*frameUsed];
- count = min(unsigned long, userCount, frameLeft) & ~1;
+ count = min_t(unsigned long, userCount, frameLeft) & ~1;
used = count;
if (copy_from_user(p, userPtr, count))
return -EFAULT;
} else {
u_char *left = &frame[*frameUsed>>1];
u_char *right = left+write_sq_block_size_half;
- count = min(unsigned long, userCount, frameLeft)>>1 & ~1;
+ count = min_t(unsigned long, userCount, frameLeft)>>1 & ~1;
used = count*2;
while (count > 0) {
if (get_user(*left++, userPtr++)
\
if (!dmasound.soft.stereo) { \
u_char *p = &frame[*frameUsed]; \
- count = min(unsigned long, userCount, frameLeft) & ~1; \
+ count = min_t(size_t, userCount, frameLeft) & ~1; \
used = count; \
while (count > 0) { \
u_char data; \
} else { \
u_char *left = &frame[*frameUsed>>1]; \
u_char *right = left+write_sq_block_size_half; \
- count = min(unsigned long, userCount, frameLeft)>>1 & ~1;\
+ count = min_t(size_t, userCount, frameLeft)>>1 & ~1; \
used = count*2; \
while (count > 0) { \
u_char data; \
if (!dmasound.soft.stereo) { \
u_char *high = &frame[*frameUsed>>1]; \
u_char *low = high+write_sq_block_size_half; \
- count = min(unsigned long, userCount, frameLeft)>>1 & ~1;\
+ count = min_t(size_t, userCount, frameLeft)>>1 & ~1; \
used = count*2; \
while (count > 0) { \
if (get_user(data, ((u_short *)userPtr)++)) \
u_char *leftl = lefth+write_sq_block_size_quarter; \
u_char *righth = lefth+write_sq_block_size_half; \
u_char *rightl = righth+write_sq_block_size_quarter; \
- count = min(unsigned long, userCount, frameLeft)>>2 & ~1;\
+ count = min_t(size_t, userCount, frameLeft)>>2 & ~1; \
used = count*4; \
while (count > 0) { \
if (get_user(data, ((u_short *)userPtr)++)) \
ssize_t count, used;
u_char *p = (u_char *) &frame[*frameUsed];
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(size_t, userCount, frameLeft);
if (copy_from_user(p,userPtr,count))
return -EFAULT;
while (count > 0) {
frameLeft >>= 1;
if (stereo)
userCount >>= 1;
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(size_t, userCount, frameLeft);
while (count > 0) {
u_char data;
if (get_user(data, userPtr++))
ssize_t count, used;
u_char *p = (u_char *) &frame[*frameUsed];
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(size_t, userCount, frameLeft);
if (copy_from_user(p,userPtr,count))
return -EFAULT;
while (count > 0) {
frameLeft >>= 1;
if (stereo)
userCount >>= 1;
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(size_t, userCount, frameLeft);
while (count > 0) {
u_char data;
if (get_user(data, userPtr++))
ssize_t count, used;
u_char *p = (u_char *) &frame[*frameUsed];
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(size_t, userCount, frameLeft);
if (copy_from_user(p,userPtr,count))
return -EFAULT;
*frameUsed += used;
frameLeft >>= 1;
if (stereo)
userCount >>= 1;
- used = count = min(unsigned long, userCount, frameLeft);
+ used = count = min_t(size_t, userCount, frameLeft);
while (count > 0) {
u_char data;
if (get_user(data, userPtr++))
if ((bytestocopy >= wiinst->buffer.fragment_size)
|| (bytestocopy >= count)) {
- bytestocopy = min(u32, bytestocopy, count);
+ bytestocopy = min_t(u32, bytestocopy, count);
emu10k1_wavein_xferdata(wiinst, (u8 *) buffer, &bytestocopy);
if ((bytestocopy >= woinst->buffer.fragment_size)
|| (bytestocopy >= count)) {
- bytestocopy = min(u32, bytestocopy, count);
+ bytestocopy = min_t(u32, bytestocopy, count);
emu10k1_waveout_xferdata(woinst, (u8 *) buffer, &bytestocopy);
u32 sizetocopy, sizetocopy_now, start;
unsigned long flags;
- sizetocopy = min(u32, buffer->size, *size);
+ sizetocopy = min_t(u32, buffer->size, *size);
*size = sizetocopy;
if (!sizetocopy)
u32 sizetocopy, sizetocopy_now, start;
unsigned long flags;
- sizetocopy = min(u32, buffer->size, *size);
+ sizetocopy = min_t(u32, buffer->size, *size);
*size = sizetocopy;
if (!sizetocopy)
}
- buffer_size = min (int, count, bluetooth->bulk_out_buffer_size);
+ buffer_size = min_t (int, count, bluetooth->bulk_out_buffer_size);
memcpy (urb->transfer_buffer, current_position, buffer_size);
/* build up our urb */
if (retval == 0)
/* ifno might usefully be passed ... */
retval = ifp->driver->ioctl (ps->dev, ctrl.ioctl_code, buf);
- /* size = min(int, size, retval)? */
+ /* size = min_t(int, size, retval)? */
}
/* cleanup and return */
}
/* len must be a multiple of 4, so commands are not split */
- len = min(int, count, oob_port->bulk_out_size );
+ len = min_t(int, count, oob_port->bulk_out_size );
if( len > 4 )
len &= ~3;
/* len must be a multiple of 4 and small enough to */
/* guarantee the write will send buffered data first, */
/* so commands are in order with data and not split */
- len = min(int, count, port->bulk_out_size-2-priv->dp_out_buf_len );
+ len = min_t(int, count, port->bulk_out_size-2-priv->dp_out_buf_len );
if( len > 4 )
len &= ~3;
spin_lock_irqsave( &priv->dp_port_lock, flags );
/* send any buffered chars from throttle time on to tty subsystem */
- len = min(int, priv->dp_in_buf_len, TTY_FLIPBUF_SIZE - tty->flip.count );
+ len = min_t(int, priv->dp_in_buf_len, TTY_FLIPBUF_SIZE - tty->flip.count );
if( len > 0 ) {
memcpy( tty->flip.char_buf_ptr, priv->dp_in_buf, len );
memcpy( tty->flip.flag_buf_ptr, priv->dp_in_flag_buf, len );
priv->dp_port_num, count, from_user, in_interrupt() );
/* copy user data (which can sleep) before getting spin lock */
- count = min(int, 64, min(int, count, port->bulk_out_size-2 ) );
+ count = min_t(int, 64, min_t(int, count, port->bulk_out_size-2 ) );
if( from_user && copy_from_user( user_buf, buf, count ) ) {
return( -EFAULT );
}
/* allow space for any buffered data and for new data, up to */
/* transfer buffer size - 2 (for command and length bytes) */
- new_len = min(int, count, port->bulk_out_size-2-priv->dp_out_buf_len );
+ new_len = min_t(int, count, port->bulk_out_size-2-priv->dp_out_buf_len );
data_len = new_len + priv->dp_out_buf_len;
if( data_len == 0 ) {
if( throttled ) {
- len = min( int, len,
+ len = min_t( int, len,
DIGI_IN_BUF_SIZE - priv->dp_in_buf_len );
if( len > 0 ) {
} else {
- len = min( int, len, TTY_FLIPBUF_SIZE - tty->flip.count );
+ len = min_t( int, len, TTY_FLIPBUF_SIZE - tty->flip.count );
if( len > 0 ) {
memcpy( tty->flip.char_buf_ptr, data, len );
}
}
- transfer_size = min (int, count, URB_TRANSFER_BUFFER_SIZE);
+ transfer_size = min_t (int, count, URB_TRANSFER_BUFFER_SIZE);
if (from_user) {
if (copy_from_user (urb->transfer_buffer, current_position, transfer_size)) {
fifo = &edge_port->txfifo;
// calculate number of bytes to put in fifo
- copySize = min (int, count, (edge_port->txCredits - fifo->count));
+ copySize = min_t (int, count, (edge_port->txCredits - fifo->count));
dbg(__FUNCTION__"(%d) of %d byte(s) Fifo room %d -- will copy %d bytes",
port->number, count, edge_port->txCredits - fifo->count, copySize);
// then copy the reset from the start of the buffer
bytesleft = fifo->size - fifo->head;
- firsthalf = min (int, bytesleft, copySize);
+ firsthalf = min_t (int, bytesleft, copySize);
dbg (__FUNCTION__" - copy %d bytes of %d into fifo ", firsthalf, bytesleft);
/* now copy our data */
/* now copy our data */
bytesleft = fifo->size - fifo->tail;
- firsthalf = min (int, bytesleft, count);
+ firsthalf = min_t (int, bytesleft, count);
memcpy(&buffer[2], &fifo->fifo[fifo->tail], firsthalf);
fifo->tail += firsthalf;
fifo->count -= firsthalf;
// TxCredits value below which driver won't bother sending (to prevent too many small writes).
// Send only if above 25%
-#define EDGE_FW_GET_TX_CREDITS_SEND_THRESHOLD(InitialCredit) (max(int, ((InitialCredit) / 4), EDGE_FW_BULK_MAX_PACKET_SIZE))
+#define EDGE_FW_GET_TX_CREDITS_SEND_THRESHOLD(InitialCredit) (max_t(int, ((InitialCredit) / 4), EDGE_FW_BULK_MAX_PACKET_SIZE))
#define EDGE_FW_BULK_MAX_PACKET_SIZE 64 // Max Packet Size for Bulk In Endpoint (EP1)
#define EDGE_FW_BULK_READ_BUFFER_SIZE 1024 // Size to use for Bulk reads
/* initialize some parts of the port structures */
/* we don't use num_ports here cauz some devices have more endpoint pairs than ports */
- max_endpoints = max(int, num_bulk_in, num_bulk_out);
- max_endpoints = max(int, max_endpoints, num_interrupt_in);
- max_endpoints = max(int, max_endpoints, serial->num_ports);
+ max_endpoints = max_t(int, num_bulk_in, num_bulk_out);
+ max_endpoints = max_t(int, max_endpoints, num_interrupt_in);
+ max_endpoints = max_t(int, max_endpoints, serial->num_ports);
dbg (__FUNCTION__ " - setting up %d port structures for this device", max_endpoints);
for (i = 0; i < max_endpoints; ++i) {
port = &serial->port[i];
}
}
- transfer_size = min (int, count, URB_TRANSFER_BUFFER_SIZE);
+ transfer_size = min_t (int, count, URB_TRANSFER_BUFFER_SIZE);
if (from_user) {
if (copy_from_user (urb->transfer_buffer, current_position, transfer_size)) {
bytes_sent = -EFAULT;
do {
// loop, never allocate or transfer more than 64k at once (min(128k, 255*info->ssize) is the real limit)
- len = min(int, totallen, 65536);
+ len = min_t(int, totallen, 65536);
if (use_sg) {
sg = (struct scatterlist *) dest;
do {
// loop, never allocate or transfer more than 64k at once (min(128k, 255*info->ssize) is the real limit)
- len = min(int, totallen, 65536);
+ len = min_t(int, totallen, 65536);
if (use_sg) {
sg = (struct scatterlist *) src;
do {
// loop, never allocate or transfer more than 64k at once (min(128k, 255*info->ssize) is the real limit)
- len = min(int, totallen, 65536);
+ len = min_t(int, totallen, 65536);
if (use_sg) {
sg = (struct scatterlist *) dest;
do {
// loop, never allocate or transfer more than 64k at once (min(128k, 255*info->ssize) is the real limit)
- len = min(int, totallen, 65536);
+ len = min_t(int, totallen, 65536);
if (use_sg) {
sg = (struct scatterlist *) src;
case RH_GET_DESCRIPTOR:
switch ((wValue & 0xff00) >> 8) {
case 0x01: /* device descriptor */
- len = min(unsigned int, leni,
- min(unsigned int,
+ len = min_t(unsigned int, leni,
+ min_t(unsigned int,
sizeof(root_hub_dev_des), wLength));
memcpy(data, root_hub_dev_des, len);
OK(len);
case 0x02: /* configuration descriptor */
- len = min(unsigned int, leni,
- min(unsigned int,
+ len = min_t(unsigned int, leni,
+ min_t(unsigned int,
sizeof(root_hub_config_des), wLength));
memcpy (data, root_hub_config_des, len);
OK(len);
uhci->io_addr, "UHCI-alt",
data, wLength);
if (len > 0) {
- OK(min(int, leni, len));
+ OK(min_t(int, leni, len));
} else
stat = -EPIPE;
}
break;
case RH_GET_DESCRIPTOR | RH_CLASS:
root_hub_hub_des[2] = uhci->rh.numports;
- len = min(unsigned int, leni,
- min(unsigned int, sizeof(root_hub_hub_des), wLength));
+ len = min_t(unsigned int, leni,
+ min_t(unsigned int, sizeof(root_hub_hub_des), wLength));
memcpy(data, root_hub_hub_des, len);
OK(len);
case RH_GET_CONFIGURATION:
if (ret > 0) {
memcpy(rh_data, data,
- min(unsigned int, len,
- min(unsigned int, rh_len, sizeof(data))));
+ min_t(unsigned int, len,
+ min_t(unsigned int, rh_len, sizeof(data))));
return len;
}
return 0;
case RH_GET_DESCRIPTOR:
switch ((wValue & 0xff00) >> 8) {
case (0x01): /* device descriptor */
- len = min(unsigned int,
+ len = min_t(unsigned int,
leni,
- min(unsigned int,
+ min_t(unsigned int,
sizeof (root_hub_dev_des),
wLength));
data_buf = root_hub_dev_des; OK(len);
case (0x02): /* configuration descriptor */
- len = min(unsigned int,
+ len = min_t(unsigned int,
leni,
- min(unsigned int,
+ min_t(unsigned int,
sizeof (root_hub_config_des),
wLength));
data_buf = root_hub_config_des; OK(len);
data, wLength);
if (len > 0) {
data_buf = data;
- OK(min(int, leni, len));
+ OK(min_t(int, leni, len));
}
// else fallthrough
default:
data_buf [10] = data_buf [9] = 0xff;
}
- len = min(unsigned int, leni,
- min(unsigned int, data_buf [0], wLength));
+ len = min_t(unsigned int, leni,
+ min_t(unsigned int, data_buf [0], wLength));
OK (len);
}
// ohci_dump_roothub (ohci, 0);
#endif
- len = min(int, len, leni);
+ len = min_t(int, len, leni);
if (data != data_buf)
memcpy (data, data_buf, len);
urb->actual_length = len;
case RH_GET_DESCRIPTOR:
switch ((wValue & 0xff00) >> 8) {
case (0x01): /* device descriptor */
- len = min(unsigned int, leni,
- min(unsigned int,
+ len = min_t(unsigned int, leni,
+ min_t(unsigned int,
sizeof (root_hub_dev_des), wLength));
memcpy (data, root_hub_dev_des, len);
OK (len);
case (0x02): /* configuration descriptor */
- len = min(unsigned int, leni,
- min(unsigned int,
+ len = min_t(unsigned int, leni,
+ min_t(unsigned int,
sizeof (root_hub_config_des), wLength));
memcpy (data, root_hub_config_des, len);
OK (len);
uhci->io_addr, "UHCI",
data, wLength);
if (len > 0) {
- OK(min(int, leni, len));
+ OK(min_t(int, leni, len));
} else
stat = -EPIPE;
}
case RH_GET_DESCRIPTOR | RH_CLASS:
root_hub_hub_des[2] = uhci->rh.numports;
- len = min(unsigned int, leni,
- min(unsigned int, sizeof (root_hub_hub_des), wLength));
+ len = min_t(unsigned int, leni,
+ min_t(unsigned int, sizeof (root_hub_hub_des), wLength));
memcpy (data, root_hub_hub_des, len);
OK (len);
goto out;
if (ia_valid & ATTR_SIZE)
- vmtruncate(inode, attr->ia_size);
+ error = vmtruncate(inode, attr->ia_size);
+
+ if (error)
+ goto out;
+
if (ia_valid & ATTR_MTIME) {
inode->i_mtime = attr->ia_mtime;
adfs_unix2adfs_time(inode, attr->ia_mtime);
return retval;
}
-void inode_setattr(struct inode * inode, struct iattr * attr)
+int inode_setattr(struct inode * inode, struct iattr * attr)
{
unsigned int ia_valid = attr->ia_valid;
+ int error = 0;
+
+ if (ia_valid & ATTR_SIZE) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ goto out;
+ }
if (ia_valid & ATTR_UID)
inode->i_uid = attr->ia_uid;
if (ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
- if (ia_valid & ATTR_SIZE)
- vmtruncate(inode, attr->ia_size);
if (ia_valid & ATTR_ATIME)
inode->i_atime = attr->ia_atime;
if (ia_valid & ATTR_MTIME)
inode->i_mode &= ~S_ISGID;
}
mark_inode_dirty(inode);
+out:
+ return error;
}
static int setattr_mask(unsigned int ia_valid)
atomic_t buffermem_pages = ATOMIC_INIT(0);
/* Here is the parameter block for the bdflush process. If you add or
- * remove any of the parameters, make sure to update kernel/sysctl.c.
+ * remove any of the parameters, make sure to update kernel/sysctl.c
+ * and the documentation at linux/Documentation/sysctl/vm.txt.
*/
#define N_PARAM 9
#define INSIZE(tag) sizeof(struct coda_ ## tag ## _in)
#define OUTSIZE(tag) sizeof(struct coda_ ## tag ## _out)
-#define SIZE(tag) max(unsigned int, INSIZE(tag), OUTSIZE(tag))
+#define SIZE(tag) max_t(unsigned int, INSIZE(tag), OUTSIZE(tag))
/* the upcalls */
int offset;
offset = INSIZE(lookup);
- insize = max(unsigned int, offset + length +1, OUTSIZE(lookup));
+ insize = max_t(unsigned int, offset + length +1, OUTSIZE(lookup));
UPARG(CODA_LOOKUP);
inp->coda_lookup.VFid = *fid;
int offset;
offset = INSIZE(mkdir);
- insize = max(unsigned int, offset + length + 1, OUTSIZE(mkdir));
+ insize = max_t(unsigned int, offset + length + 1, OUTSIZE(mkdir));
UPARG(CODA_MKDIR);
inp->coda_mkdir.VFid = *dirfid;
int offset, s;
offset = INSIZE(rename);
- insize = max(unsigned int, offset + new_length + old_length + 8,
+ insize = max_t(unsigned int, offset + new_length + old_length + 8,
OUTSIZE(rename));
UPARG(CODA_RENAME);
int offset;
offset = INSIZE(create);
- insize = max(unsigned int, offset + length + 1, OUTSIZE(create));
+ insize = max_t(unsigned int, offset + length + 1, OUTSIZE(create));
UPARG(CODA_CREATE);
inp->coda_create.VFid = *dirfid;
int offset;
offset = INSIZE(rmdir);
- insize = max(unsigned int, offset + length + 1, OUTSIZE(rmdir));
+ insize = max_t(unsigned int, offset + length + 1, OUTSIZE(rmdir));
UPARG(CODA_RMDIR);
inp->coda_rmdir.VFid = *dirfid;
int error=0, insize, outsize, offset;
offset = INSIZE(remove);
- insize = max(unsigned int, offset + length + 1, OUTSIZE(remove));
+ insize = max_t(unsigned int, offset + length + 1, OUTSIZE(remove));
UPARG(CODA_REMOVE);
inp->coda_remove.VFid = *dirfid;
int retlen;
char *result;
- insize = max(unsigned int,
+ insize = max_t(unsigned int,
INSIZE(readlink), OUTSIZE(readlink)+ *length + 1);
UPARG(CODA_READLINK);
int offset;
offset = INSIZE(link);
- insize = max(unsigned int, offset + len + 1, OUTSIZE(link));
+ insize = max_t(unsigned int, offset + len + 1, OUTSIZE(link));
UPARG(CODA_LINK);
inp->coda_link.sourceFid = *fid;
int offset, s;
offset = INSIZE(symlink);
- insize = max(unsigned int, offset + len + symlen + 8, OUTSIZE(symlink));
+ insize = max_t(unsigned int, offset + len + symlen + 8, OUTSIZE(symlink));
UPARG(CODA_SYMLINK);
/* inp->coda_symlink.attr = *tva; XXXXXX */
union outputArgs *outp;
int insize, outsize, error;
- insize = max(unsigned int, INSIZE(statfs), OUTSIZE(statfs));
+ insize = max_t(unsigned int, INSIZE(statfs), OUTSIZE(statfs));
UPARG(CODA_STATFS);
error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
/*================ Forward declarations ================*/
static loff_t cap_info_llseek(struct file *, loff_t,
- int);
+ int);
static hfs_rwret_t cap_info_read(struct file *, char *,
hfs_rwarg_t, loff_t *);
static hfs_rwret_t cap_info_write(struct file *, const char *,
attr->ia_valid &= ~ATTR_SIZE;
}
}
- inode_setattr(inode, attr);
+ error = inode_setattr(inode, attr);
+ if (error)
+ return error;
/* We wouldn't want to mess with the sizes of the other fork */
attr->ia_valid &= ~ATTR_SIZE;
{
struct inode *inode = dentry->d_inode;
int error;
- if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size) return -EINVAL;
+ if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size)
+ return -EINVAL;
if (inode->i_sb->s_hpfs_root == inode->i_ino) return -EINVAL;
if ((error = inode_change_ok(inode, attr))) return error;
- inode_setattr(inode, attr);
+ error = inode_setattr(inode, attr);
+ if (error) return error;
hpfs_write_inode(inode);
return 0;
}
if ((inode->i_sb->u.isofs_sb.s_rock_offset==-1)
&&(inode->i_sb->u.isofs_sb.s_rock==2))
{
- printk(KERN_DEBUG"scanning for RockRidge behind XA attributes\n");
result=parse_rock_ridge_inode_internal(de,inode,14);
};
return result;
offset = page->index << PAGE_CACHE_SHIFT;
if (offset < inode->i_size) {
- read_len = min(long, inode->i_size - offset, PAGE_SIZE);
+ read_len = min_t(long, inode->i_size - offset, PAGE_SIZE);
r = jffs_read_data(f, buf, offset, read_len);
if (r == read_len) {
if (read_len < PAGE_SIZE) {
goto out_isem;
}
- thiscount = min(unsigned int,
+ thiscount = min_t(unsigned int,
c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode),
count);
if (pos < f->size) {
node->removed_size = raw_inode.rsize =
- min(unsigned int, thiscount, f->size - pos);
+ min_t(unsigned int, thiscount, f->size - pos);
/* If this node is going entirely over the top of old data,
we can allow it to go into the reserved space, because
D3(printk("jffs_file_write(): new f_pos %ld.\n", (long)pos));
- thiscount = min(unsigned int,
+ thiscount = min_t(unsigned int,
c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode),
count);
}
"version: %u, node_offset: %u\n",
f->name, node->ino, node->version, node_offset));
- r = min(u32, avail, max_size);
+ r = min_t(u32, avail, max_size);
D3(printk(KERN_NOTICE "jffs_get_node_data\n"));
flash_safe_read(fmc->mtd, pos, buf, r);
int r;
if (!node->fm) {
/* This node does not refer to real data. */
- r = min(u32, size - read_data,
+ r = min_t(u32, size - read_data,
node->data_size - node_offset);
memset(&buf[read_data], 0, r);
}
else {
/* No. No need to split the node. Just remove
the end of the node. */
- int r = min(u32, n->data_offset + n->data_size
+ int r = min_t(u32, n->data_offset + n->data_size
- offset, remove_size);
n->data_size -= r;
remove_size -= r;
}
while (size) {
- __u32 s = min(int, size, PAGE_SIZE);
+ __u32 s = min_t(int, size, PAGE_SIZE);
if ((r = jffs_read_data(f, (char *)page,
offset, s)) < s) {
free_page((unsigned long)page);
printk("JFFS: Erase failed! pos = 0x%lx\n",
(long)pos);
jffs_hexdump(fmc->mtd, pos,
- min(u32, 256, end - pos));
+ min_t(u32, 256, end - pos));
err = -1;
break;
}
#include <linux/pagemap.h>
#include <linux/dnotify.h>
#include <linux/smp_lock.h>
+#include <linux/personality.h>
#include <asm/namei.h>
#include <asm/uaccess.h>
/* First read in as much as possible for each bufsize. */
while (already_read < count) {
int read_this_time;
- size_t to_read = min(unsigned int,
+ size_t to_read = min_t(unsigned int,
bufsize - (pos % bufsize),
count - already_read);
}
while (already_written < count) {
int written_this_time;
- size_t to_write = min(unsigned int,
+ size_t to_write = min_t(unsigned int,
bufsize - (pos % bufsize),
count - already_written);
ncp_inode_close(inode);
result = ncp_make_closed(inode);
if (!result)
- vmtruncate(inode, attr->ia_size);
+ result = vmtruncate(inode, attr->ia_size);
}
out:
return result;
}
{
struct ncp_objectname_ioctl user;
- int outl;
+ size_t outl;
if (copy_from_user(&user,
(struct ncp_objectname_ioctl*)arg,
}
{
struct ncp_privatedata_ioctl user;
- int outl;
+ size_t outl;
if (copy_from_user(&user,
(struct ncp_privatedata_ioctl*)arg,
to_read = bufsize - (pos % bufsize);
- to_read = min(unsigned int, to_read, count - already_read);
+ to_read = min_t(unsigned int, to_read, count - already_read);
if (ncp_read_kernel(NCP_SERVER(inode),
NCP_FINFO(inode)->file_handle,
ncp_unlock_server(server);
return result;
}
- *target = min(unsigned int, ntohs(ncp_reply_word(server, 0)), size);
+ *target = min_t(unsigned int, ntohs(ncp_reply_word(server, 0)), size);
ncp_unlock_server(server);
return 0;
/* NCP over UDP returns 0 (!!!) */
result = ntohs(ncp_reply_word(server, 0));
if (result >= NCP_BLOCK_SIZE)
- size = min(int, result, size);
+ size = min(result, size);
*ret_size = size;
*ret_options = ncp_reply_byte(server, 4);
memcpy(data,server->sign_root,8);
PUT_LE32(data+8,(*size));
memcpy(data+12,server->packet+sizeof(struct ncp_request_header)-1,
- min(unsigned int,(*size)-sizeof(struct ncp_request_header)+1,52));
+ min_t(unsigned int,(*size)-sizeof(struct ncp_request_header)+1,52));
nwsign(server->sign_last,data,server->sign_last);
"%s.\n", kdevname(dev));
goto out;
}
- if (romfs_checksum(rsb, min(int, sz, 512))) {
+ if (romfs_checksum(rsb, min_t(int, sz, 512))) {
printk ("romfs: bad initial checksum on dev "
"%s.\n", kdevname(dev));
goto out;
return -1; /* error */
avail = ROMBSIZE - (offset & ROMBMASK);
- maxsize = min(unsigned long, count, avail);
+ maxsize = min_t(unsigned long, count, avail);
res = strnlen(((char *)bh->b_data)+(offset&ROMBMASK), maxsize);
brelse(bh);
bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
if (!bh)
return -1;
- maxsize = min(unsigned long, count - res, ROMBSIZE);
+ maxsize = min_t(unsigned long, count - res, ROMBSIZE);
avail = strnlen(bh->b_data, maxsize);
res += avail;
brelse(bh);
return -1; /* error */
avail = ROMBSIZE - (offset & ROMBMASK);
- maxsize = min(unsigned long, count, avail);
+ maxsize = min_t(unsigned long, count, avail);
memcpy(dest, ((char *)bh->b_data) + (offset & ROMBMASK), maxsize);
brelse(bh);
bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
if (!bh)
return -1;
- maxsize = min(unsigned long, count - res, ROMBSIZE);
+ maxsize = min_t(unsigned long, count - res, ROMBSIZE);
memcpy(dest, bh->b_data, maxsize);
brelse(bh);
res += maxsize;
offset = page->index << PAGE_CACHE_SHIFT;
if (offset < inode->i_size) {
avail = inode->i_size-offset;
- readlen = min(unsigned long, avail, PAGE_SIZE);
+ readlen = min_t(unsigned long, avail, PAGE_SIZE);
if (romfs_copyfrom(inode, buf, inode->u.romfs_i.i_dataoffset+offset, readlen) == readlen) {
if (readlen < PAGE_SIZE) {
memset(buf + readlen,0,PAGE_SIZE-readlen);
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/poll.h>
+#include <linux/personality.h> /* for STICKY_TIMEOUTS */
#include <linux/file.h>
#include <asm/uaccess.h>
attr->ia_size);
if (error)
goto out;
- vmtruncate(inode, attr->ia_size);
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ goto out;
refresh = 1;
}
if (attr->ia_mode == COH_KLUDGE_SYMLINK_MODE)
attr->ia_mode = COH_KLUDGE_NOT_SYMLINK;
- inode_setattr(inode, attr);
- return 0;
+ return inode_setattr(inode, attr);
}
static struct buffer_head * sysv_update_inode(struct inode * inode)
*p = SWAB32(result);
*err = 0;
inode->i_blocks += count << uspi->s_nspfshift;
- inode->u.ufs_i.i_lastfrag = max(u32, inode->u.ufs_i.i_lastfrag, fragment + count);
+ inode->u.ufs_i.i_lastfrag = max_t(u32, inode->u.ufs_i.i_lastfrag, fragment + count);
NULLIFY_FRAGMENTS
}
unlock_super(sb);
if (result) {
*err = 0;
inode->i_blocks += count << uspi->s_nspfshift;
- inode->u.ufs_i.i_lastfrag = max(u32, inode->u.ufs_i.i_lastfrag, fragment + count);
+ inode->u.ufs_i.i_lastfrag = max_t(u32, inode->u.ufs_i.i_lastfrag, fragment + count);
NULLIFY_FRAGMENTS
unlock_super(sb);
UFSD(("EXIT, result %u\n", result))
*p = SWAB32(result);
*err = 0;
inode->i_blocks += count << uspi->s_nspfshift;
- inode->u.ufs_i.i_lastfrag = max(u32, inode->u.ufs_i.i_lastfrag, fragment + count);
+ inode->u.ufs_i.i_lastfrag = max_t(u32, inode->u.ufs_i.i_lastfrag, fragment + count);
NULLIFY_FRAGMENTS
unlock_super(sb);
if (newcount < request)
retry = 0;
frag1 = DIRECT_FRAGMENT;
- frag4 = min(u32, UFS_NDIR_FRAGMENT, inode->u.ufs_i.i_lastfrag);
+ frag4 = min_t(u32, UFS_NDIR_FRAGMENT, inode->u.ufs_i.i_lastfrag);
frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
frag3 = frag4 & ~uspi->s_fpbmask;
block1 = block2 = 0;
size = ubh->count << uspi->s_fshift;
bhno = 0;
while (size) {
- len = min(unsigned int, size, uspi->s_fsize);
+ len = min_t(unsigned int, size, uspi->s_fsize);
memcpy (mem, ubh->bh[bhno]->b_data, len);
mem += uspi->s_fsize;
size -= len;
size = ubh->count << uspi->s_fshift;
bhno = 0;
while (size) {
- len = min(unsigned int, size, uspi->s_fsize);
+ len = min_t(unsigned int, size, uspi->s_fsize);
memcpy (ubh->bh[bhno]->b_data, mem, len);
mem += uspi->s_fsize;
size -= len;
base = offset >> uspi->s_bpfshift;
offset &= uspi->s_bpfmask;
for (;;) {
- count = min(unsigned int, size + offset, uspi->s_bpf);
+ count = min_t(unsigned int, size + offset, uspi->s_bpf);
size -= count - offset;
pos = ext2_find_next_zero_bit (ubh->bh[base]->b_data, count, offset);
if (pos < count || !size)
base = start >> uspi->s_bpfshift;
start &= uspi->s_bpfmask;
for (;;) {
- count = min(unsigned int,
+ count = min_t(unsigned int,
size + (uspi->s_bpf - start), uspi->s_bpf)
- (uspi->s_bpf - start);
size -= count;
ret = umsdos_notify_change_locked(dentry, attr);
up(&dir->i_sem);
if (ret == 0)
- inode_setattr (inode, attr);
+ ret = inode_setattr (inode, attr);
out:
if (old_dentry)
dput (dentry); /* if we had to use fake dentry for hardlinks, dput() it now */
#ifndef _ASM_IO_H
#define _ASM_IO_H
+#include <linux/config.h>
+
/*
* This file contains the definitions for the x86 IO instructions
* inb/inw/inl/outb/outw/outl and the "string versions" of the same
* Temporary debugging check to catch old code using
* unmapped ISA addresses. Will be removed in 2.4.
*/
-#if 0
+#if CONFIG_DEBUG_IOVIRT
extern void *__io_virt_debug(unsigned long x, const char *file, int line);
extern unsigned long __io_phys_debug(unsigned long x, const char *file, int line);
#define __io_virt(x) __io_virt_debug((unsigned long)(x), __FILE__, __LINE__)
__asm__ __volatile__("rep;nop");
}
+/* Prefetch instructions for Pentium III and AMD Athlon */
+#ifdef CONFIG_MPENTIUMIII
+
+#define ARCH_HAS_PREFETCH
+extern inline void prefetch(const void *x)
+{
+ __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
+}
+
+#elif CONFIG_X86_USE_3DNOW
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+extern inline void prefetch(const void *x)
+{
+ __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
+}
+
+extern inline void prefetchw(const void *x)
+{
+ __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
+}
+#define spin_lock_prefetch(x) prefetchw(x)
+
+#endif
+
#endif /* __ASM_I386_PROCESSOR_H */
#include <asm/atomic.h>
#include <asm/rwlock.h>
#include <asm/page.h>
+#include <linux/config.h>
extern int printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2)));
* initialize their spinlocks properly, tsk tsk.
* Remember to turn this off in 2.4. -ben
*/
+#if defined(CONFIG_DEBUG_SPINLOCK)
+#define SPINLOCK_DEBUG 1
+#else
#define SPINLOCK_DEBUG 0
+#endif
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
extern void __put_user_1(void);
extern void __put_user_2(void);
extern void __put_user_4(void);
+extern void __put_user_8(void);
extern void __put_user_bad(void);
__pu_err; \
})
+#define __put_user_u64(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: movl %%eax,0(%2)\n" \
+ "2: movl %%edx,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+ " jmp 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,4b\n" \
+ " .long 2b,4b\n" \
+ ".previous" \
+ : "=r"(err) \
+ : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
+
#define __put_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break; \
case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break; \
case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break; \
+ case 8: __put_user_u64(x,ptr,retval); break; \
default: __put_user_bad(); \
} \
} while (0)
#define AC97_PCM_MIC_ADC_RATE 0x0034 /* PCM MIC ADC Rate */
#define AC97_CENTER_LFE_MASTER 0x0036 /* Center + LFE Master Volume */
#define AC97_SURROUND_MASTER 0x0038 /* Surround (Rear) Master Volume */
-#define AC97_RESERVED_3A 0x003A /* Reserved */
+#define AC97_RESERVED_3A 0x003A /* Reserved in AC '97 < 2.2 */
+
+/* AC'97 2.2 */
+#define AC97_SPDIF_CONTROL 0x003A /* S/PDIF Control */
/* range 0x3c-0x58 - MODEM */
#define AC97_EXTENDED_MODEM_ID 0x003C
#define AC97_GP_ST 0x4000 /* Stereo Enhancement 1=on */
#define AC97_GP_POP 0x8000 /* Pcm Out Path, 0=pre 3D, 1=post 3D */
+/* extended audio status and control bit defines */
+#define AC97_EA_VRA 0x0001 /* Variable bit rate enable bit */
+#define AC97_EA_DRA 0x0002 /* Double-rate audio enable bit */
+#define AC97_EA_SPDIF 0x0004 /* S/PDIF Enable bit */
+#define AC97_EA_VRM 0x0008 /* Variable bit rate for MIC enable bit */
+#define AC97_EA_CDAC 0x0040 /* PCM Center DAC is ready (Read only) */
+#define AC97_EA_SDAC 0x0040 /* PCM Surround DACs are ready (Read only) */
+#define AC97_EA_LDAC 0x0080 /* PCM LFE DAC is ready (Read only) */
+#define AC97_EA_MDAC 0x0100 /* MIC ADC is ready (Read only) */
+#define AC97_EA_SPCV 0x0400 /* S/PDIF configuration valid (Read only) */
+#define AC97_EA_PRI 0x0800 /* Turns the PCM Center DAC off */
+#define AC97_EA_PRJ 0x1000 /* Turns the PCM Surround DACs off */
+#define AC97_EA_PRK 0x2000 /* Turns the PCM LFE DAC off */
+#define AC97_EA_PRL 0x4000 /* Turns the MIC ADC off */
+#define AC97_EA_SLOT_MASK 0xffcf /* Mask for slot assignment bits */
+#define AC97_EA_SPSA_3_4 0x0000 /* Slot assigned to 3 & 4 */
+#define AC97_EA_SPSA_7_8 0x0010 /* Slot assigned to 7 & 8 */
+#define AC97_EA_SPSA_6_9 0x0020 /* Slot assigned to 6 & 9 */
+#define AC97_EA_SPSA_10_11 0x0030 /* Slot assigned to 10 & 11 */
+
+/* S/PDIF control bit defines */
+#define AC97_SC_PRO 0x0001 /* Professional status */
+#define AC97_SC_NAUDIO 0x0002 /* Non audio stream */
+#define AC97_SC_COPY 0x0004 /* Copyright status */
+#define AC97_SC_PRE 0x0008 /* Preemphasis status */
+#define AC97_SC_CC_MASK 0x07f0 /* Category Code mask */
+#define AC97_SC_L 0x0800 /* Generation Level status */
+#define AC97_SC_SPSR_MASK 0xcfff /* S/PDIF Sample Rate bits */
+#define AC97_SC_SPSR_44K 0x0000 /* Use 44.1kHz Sample rate */
+#define AC97_SC_SPSR_48K 0x2000 /* Use 48kHz Sample rate */
+#define AC97_SC_SPSR_32K 0x3000 /* Use 32kHz Sample rate */
+#define AC97_SC_DRS 0x4000 /* Double Rate S/PDIF */
+#define AC97_SC_V 0x8000 /* Validity status */
+
/* powerdown control and status bit defines */
/* status */
#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */
#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */
#define BLKRRPART _IO(0x12,95) /* re-read partition table */
-#define BLKGETSIZE _IO(0x12,96) /* return device size */
+#define BLKGETSIZE _IO(0x12,96) /* return device size /512 (long *arg) */
#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */
#define BLKRASET _IO(0x12,98) /* Set read ahead for block device */
#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */
/* A jump here: 108-111 have been used for various private purposes. */
#define BLKBSZGET _IOR(0x12,112,sizeof(int))
#define BLKBSZSET _IOW(0x12,113,sizeof(int))
+#define BLKGETSIZE64 _IOR(0x12,114,sizeof(u64)) /* return device size in bytes (u64 *arg) */
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
extern int generic_osync_inode(struct inode *, int);
extern int inode_change_ok(struct inode *, struct iattr *);
-extern void inode_setattr(struct inode *, struct iattr *);
+extern int inode_setattr(struct inode *, struct iattr *);
/*
* Common dentry functions for inclusion in the VFS
((unsigned char *)&addr)[1], \
((unsigned char *)&addr)[0]
-#define min(type,x,y) \
+/*
+ * min()/max() macros that also do
+ * strict type-checking.. See the
+ * "unnecessary" pointer comparison.
+ */
+#define min(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x < _y ? _x : _y; })
+
+#define max(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x > _y ? _x : _y; })
+
+/*
+ * ..and if you can't take the strict
+ * types, you can specify one yourself.
+ *
+ * Or not use min/max at all, of course.
+ */
+#define min_t(type,x,y) \
({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
-#define max(type,x,y) \
+#define max_t(type,x,y) \
({ type __x = (x); type __y = (y); __x > __y ? __x: __y; })
#endif /* __KERNEL__ */
extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
-extern void vmtruncate(struct inode * inode, loff_t offset);
+extern int vmtruncate(struct inode * inode, loff_t offset);
extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
extern pte_t *FASTCALL(pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
-#ifndef _PERSONALITY_H
-#define _PERSONALITY_H
+#ifndef _LINUX_PERSONALITY_H
+#define _LINUX_PERSONALITY_H
-#include <linux/linkage.h>
-#include <linux/ptrace.h>
-#include <asm/current.h>
+/*
+ * Handling of different ABIs (personalities).
+ */
+
+struct exec_domain;
+struct pt_regs;
+
+extern int register_exec_domain(struct exec_domain *);
+extern int unregister_exec_domain(struct exec_domain *);
+extern int __set_personality(unsigned long);
+
+
+/*
+ * Sysctl variables related to binary emulation.
+ */
+extern unsigned long abi_defhandler_coff;
+extern unsigned long abi_defhandler_elf;
+extern unsigned long abi_defhandler_lcall7;
+extern unsigned long abi_defhandler_libcso;
+extern int abi_fake_utsname;
-/* Flags for bug emulation. These occupy the top three bytes. */
-#define STICKY_TIMEOUTS 0x4000000
-#define WHOLE_SECONDS 0x2000000
-#define ADDR_LIMIT_32BIT 0x0800000
-/* Personality types. These go in the low byte. Avoid using the top bit,
- * it will conflict with error returns.
+/*
+ * Flags for bug emulation.
+ *
+ * These occupy the top three bytes.
*/
-#define PER_MASK (0x00ff)
-#define PER_LINUX (0x0000)
-#define PER_LINUX_32BIT (0x0000 | ADDR_LIMIT_32BIT)
-#define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
-#define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
-#define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
-#define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
-#define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
-#define PER_BSD (0x0006)
-#define PER_SUNOS (PER_BSD | STICKY_TIMEOUTS)
-#define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
-#define PER_LINUX32 (0x0008)
-#define PER_IRIX32 (0x0009 | STICKY_TIMEOUTS) /* IRIX5 32-bit */
-#define PER_IRIXN32 (0x000a | STICKY_TIMEOUTS) /* IRIX6 new 32-bit */
-#define PER_IRIX64 (0x000b | STICKY_TIMEOUTS) /* IRIX6 64-bit */
-#define PER_RISCOS (0x000c)
-#define PER_SOLARIS (0x000d | STICKY_TIMEOUTS)
-
-/* Prototype for an lcall7 syscall handler. */
-typedef void (*lcall7_func)(int, struct pt_regs *);
-
-
-/* Description of an execution domain - personality range supported,
- * lcall7 syscall handler, start up / shut down functions etc.
- * N.B. The name and lcall7 handler must be where they are since the
- * offset of the handler is hard coded in kernel/sys_call.S.
+enum {
+ MMAP_PAGE_ZERO = 0x0100000,
+ ADDR_LIMIT_32BIT = 0x0800000,
+ SHORT_INODE = 0x1000000,
+ WHOLE_SECONDS = 0x2000000,
+ STICKY_TIMEOUTS = 0x4000000,
+};
+
+/*
+ * Personality types.
+ *
+ * These go in the low byte. Avoid using the top bit, it will
+ * conflict with error returns.
+ */
+enum {
+ PER_LINUX = 0x0000,
+ PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
+ PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
+ PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
+ PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
+ WHOLE_SECONDS | SHORT_INODE,
+ PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
+ PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
+ PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
+ PER_BSD = 0x0006,
+ PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
+ PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
+ PER_LINUX32 = 0x0008,
+ PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
+ PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
+ PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
+ PER_RISCOS = 0x000c,
+ PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
+ PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
+ PER_MASK = 0x00ff,
+};
+
+
+/*
+ * Description of an execution domain.
+ *
+ * The first two members are refernced from assembly source
+ * and should stay where they are unless explicitly needed.
*/
+typedef void (*handler_t)(int, struct pt_regs *);
+
struct exec_domain {
- const char *name;
- lcall7_func handler;
- unsigned char pers_low, pers_high;
- unsigned long * signal_map;
- unsigned long * signal_invmap;
- struct module * module;
- struct exec_domain *next;
+ const char *name; /* name of the execdomain */
+ handler_t handler; /* handler for syscalls */
+ unsigned char pers_low; /* lowest personality */
+ unsigned char pers_high; /* highest personality */
+ unsigned long *signal_map; /* signal mapping */
+ unsigned long *signal_invmap; /* reverse signal mapping */
+ struct map_segment *err_map; /* error mapping */
+ struct map_segment *socktype_map; /* socket type mapping */
+ struct map_segment *sockopt_map; /* socket option mapping */
+ struct map_segment *af_map; /* address family mapping */
+ struct module *module; /* module context of the ed. */
+ struct exec_domain *next; /* linked list (internal) */
};
-extern struct exec_domain default_exec_domain;
-
-extern int register_exec_domain(struct exec_domain *it);
-extern int unregister_exec_domain(struct exec_domain *it);
-#define put_exec_domain(it) \
- if (it && it->module) __MOD_DEC_USE_COUNT(it->module);
-#define get_exec_domain(it) \
- if (it && it->module) __MOD_INC_USE_COUNT(it->module);
-extern void __set_personality(unsigned long personality);
-#define set_personality(pers) do { \
- if (current->personality != pers) \
- __set_personality(pers); \
+/*
+ * Return the base personality without flags.
+ */
+#define personality(pers) (pers & PER_MASK)
+
+/*
+ * Personality of the currently running process.
+ */
+#define get_personality (current->personality)
+
+/*
+ * Change personality of the currently running process.
+ */
+#define set_personality(pers) \
+ ((current->personality == pers) ? 0 : __set_personality(pers))
+
+/*
+ * Load an execution domain.
+ */
+#define get_exec_domain(ep) \
+do { \
+ if (ep != NULL && ep->module != NULL) \
+ __MOD_INC_USE_COUNT(ep->module); \
+} while (0)
+
+/*
+ * Unload an execution domain.
+ */
+#define put_exec_domain(ep) \
+do { \
+ if (ep != NULL && ep->module != NULL) \
+ __MOD_DEC_USE_COUNT(ep->module); \
} while (0)
-asmlinkage long sys_personality(unsigned long personality);
-#endif /* _PERSONALITY_H */
+#endif /* _LINUX_PERSONALITY_H */
#include <linux/config.h>
#include <linux/binfmts.h>
-#include <linux/personality.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/securebits.h>
#include <linux/fs_struct.h>
+struct exec_domain;
+
/*
* cloning flags:
*/
#define MAX_COUNTER (20*HZ/100)
#define DEF_NICE (0)
+
+/*
+ * The default (Linux) execution domain.
+ */
+extern struct exec_domain default_exec_domain;
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
* 64 (1 << INACTIVE_SHIFT) seconds.
*/
#define INACTIVE_SHIFT 6
-#define inactive_target min(unsigned long, \
+#define inactive_target min_t(unsigned long, \
(memory_pressure >> INACTIVE_SHIFT), \
(num_physpages / 4))
CTL_FS=5, /* Filesystems */
CTL_DEBUG=6, /* Debugging */
CTL_DEV=7, /* Devices */
- CTL_BUS=8 /* Buses */
+ CTL_BUS=8, /* Buses */
+ CTL_ABI=9 /* Binary emulation */
};
/* CTL_BUS names: */
DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6
};
+/* /proc/sys/abi */
+enum
+{
+ ABI_DEFHANDLER_COFF=1, /* default handler for coff binaries */
+ ABI_DEFHANDLER_ELF=2, /* default handler for ELF binaries */
+ ABI_DEFHANDLER_LCALL7=3,/* default handler for procs using lcall7 */
+ ABI_DEFHANDLER_LIBCSO=4,/* default handler for an libc.so ELF interp */
+ ABI_TRACE=5, /* tracing flags */
+ ABI_FAKE_UTSNAME=6, /* fake target utsname information */
+};
+
#ifdef __KERNEL__
extern asmlinkage long sys_sysctl(struct __sysctl_args *);
static inline int sock_rcvlowat(struct sock *sk, int waitall, int len)
{
- return (waitall ? len : min(int, sk->rcvlowat, len)) ? : 1;
+ return (waitall ? len : min_t(int, sk->rcvlowat, len)) ? : 1;
}
/* Alas, with timeout socket operations are not restartable.
static inline void tcp_initialize_rcv_mss(struct sock *sk)
{
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
- unsigned int hint = min(unsigned int, tp->advmss, tp->mss_cache);
+ unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
- hint = min(unsigned int, hint, tp->rcv_wnd/2);
+ hint = min_t(unsigned int, hint, tp->rcv_wnd/2);
- tp->ack.rcv_mss = max(unsigned int,
- min(unsigned int,
+ tp->ack.rcv_mss = max_t(unsigned int,
+ min_t(unsigned int,
hint, TCP_MIN_RCVMSS),
TCP_MIN_MSS);
}
*/
static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
{
- return max(u32, tp->snd_cwnd >> 1, 2);
+ return max_t(u32, tp->snd_cwnd >> 1, 2);
}
/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
return tp->snd_ssthresh;
else
- return max(u32, tp->snd_ssthresh,
+ return max_t(u32, tp->snd_ssthresh,
((tp->snd_cwnd >> 1) +
(tp->snd_cwnd >> 2)));
}
{
tp->undo_marker = 0;
tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
- tp->snd_cwnd = min(u32, tp->snd_cwnd,
+ tp->snd_cwnd = min_t(u32, tp->snd_cwnd,
tcp_packets_in_flight(tp) + 1);
tp->snd_cwnd_cnt = 0;
tp->high_seq = tp->snd_nxt;
/* If no clamp set the clamp to the max possible scaled window */
if (*window_clamp == 0)
(*window_clamp) = (65535 << 14);
- space = min(u32, *window_clamp, space);
+ space = min_t(u32, *window_clamp, space);
/* Quantize space offering to a multiple of mss if possible. */
if (space > mss)
* our initial window offering to 32k. There should also
* be a sysctl option to stop being nice.
*/
- (*rcv_wnd) = min(int, space, MAX_TCP_WINDOW);
+ (*rcv_wnd) = min_t(int, space, MAX_TCP_WINDOW);
(*rcv_wscale) = 0;
if (wscale_ok) {
/* See RFC1323 for an explanation of the limit to 14 */
(*rcv_wscale)++;
}
if (*rcv_wscale && sysctl_tcp_app_win && space>=mss &&
- space - max(unsigned int, (space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
+ space - max_t(unsigned int, (space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
(*rcv_wscale)--;
}
*rcv_wnd = init_cwnd*mss;
}
/* Set the clamp no higher than max representable value */
- (*window_clamp) = min(u32, 65535 << (*rcv_wscale), *window_clamp);
+ (*window_clamp) = min_t(u32, 65535 << (*rcv_wscale), *window_clamp);
}
static inline int tcp_win_from_space(int space)
static inline void tcp_moderate_sndbuf(struct sock *sk)
{
if (!(sk->userlocks&SOCK_SNDBUF_LOCK)) {
- sk->sndbuf = min(int, sk->sndbuf, sk->wmem_queued/2);
- sk->sndbuf = max(int, sk->sndbuf, SOCK_MIN_SNDBUF);
+ sk->sndbuf = min_t(int, sk->sndbuf, sk->wmem_queued/2);
+ sk->sndbuf = max_t(int, sk->sndbuf, SOCK_MIN_SNDBUF);
}
}
O_TARGET := kernel.o
-export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o
+export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o exec_domain.o
obj-y = sched.o dma.o fork.o exec_domain.o panic.o printk.o \
module.o exit.o itimer.o info.o time.o softirq.o resource.o \
-#include <linux/mm.h>
-#include <linux/smp_lock.h>
+/*
+ * Handling of different ABIs (personalities).
+ *
+ * We group personalities into execution domains which have their
+ * own handlers for kernel entry points, signal mapping, etc...
+ *
+ * 2001-05-06 Complete rewrite, Christoph Hellwig (hch@caldera.de)
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/module.h>
+#include <linux/personality.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
+#include <linux/types.h>
+
-static asmlinkage void no_lcall7(int segment, struct pt_regs * regs);
+static void default_handler(int, struct pt_regs *);
+
+static struct exec_domain *exec_domains = &default_exec_domain;
+static rwlock_t exec_domains_lock = RW_LOCK_UNLOCKED;
-static unsigned long ident_map[32] = {
+static u_long ident_map[32] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23,
};
struct exec_domain default_exec_domain = {
- "Linux", /* name */
- no_lcall7, /* lcall7 causes a seg fault. */
- 0, 0xff, /* All personalities. */
- ident_map, /* Identity map signals. */
- ident_map, /* - both ways. */
- NULL, /* No usage counter. */
- NULL /* Nothing after this in the list. */
+ "Linux", /* name */
+ default_handler, /* lcall7 causes a seg fault. */
+ 0, 0, /* PER_LINUX personality. */
+ ident_map, /* Identity map signals. */
+ ident_map, /* - both ways. */
};
-static struct exec_domain *exec_domains = &default_exec_domain;
-static rwlock_t exec_domains_lock = RW_LOCK_UNLOCKED;
-static asmlinkage void no_lcall7(int segment, struct pt_regs * regs)
+static void
+default_handler(int segment, struct pt_regs *regp)
{
- /*
- * This may have been a static linked SVr4 binary, so we would have the
- * personality set incorrectly. Check to see whether SVr4 is available,
- * and use it, otherwise give the user a SEGV.
- */
- set_personality(PER_SVR4);
-
- if (current->exec_domain && current->exec_domain->handler
- && current->exec_domain->handler != no_lcall7) {
- current->exec_domain->handler(segment, regs);
- return;
+ u_long pers = 0;
+
+ /*
+ * This may have been a static linked SVr4 binary, so we would
+ * have the personality set incorrectly. Or it might have been
+ * a Solaris/x86 binary. We can tell which because the former
+ * uses lcall7, while the latter used lcall 0x27.
+ * Try to find or load the appropriate personality, and fall back
+ * to just forcing a SEGV.
+ *
+ * XXX: this is IA32-specific and should be moved to the MD-tree.
+ */
+ switch (segment) {
+#ifdef __i386__
+ case 0x07:
+ pers = abi_defhandler_lcall7;
+ break;
+ case 0x27:
+ pers = PER_SOLARIS;
+ break;
+#endif
}
+ set_personality(pers);
- send_sig(SIGSEGV, current, 1);
+ if (current->exec_domain->handler != default_handler)
+ current->exec_domain->handler(segment, regp);
+ else
+ send_sig(SIGSEGV, current, 1);
}
-static struct exec_domain *lookup_exec_domain(unsigned long personality)
+static struct exec_domain *
+lookup_exec_domain(u_long personality)
{
- unsigned long pers = personality & PER_MASK;
- struct exec_domain *it;
+ struct exec_domain * ep;
+ char buffer[30];
+ u_long pers = personality(personality);
+
+ read_lock(&exec_domains_lock);
+ for (ep = exec_domains; ep; ep = ep->next) {
+ if (pers >= ep->pers_low && pers <= ep->pers_high)
+ if (try_inc_mod_count(ep->module))
+ goto out;
+ }
+ read_unlock(&exec_domains_lock);
+
+#ifdef CONFIG_KMOD
+ sprintf(buffer, "personality-%ld", pers);
+ request_module(buffer);
read_lock(&exec_domains_lock);
- for (it=exec_domains; it; it=it->next)
- if (pers >= it->pers_low && pers <= it->pers_high) {
- if (!try_inc_mod_count(it->module))
- continue;
- read_unlock(&exec_domains_lock);
- return it;
- }
+ for (ep = exec_domains; ep; ep = ep->next) {
+ if (pers >= ep->pers_low && pers <= ep->pers_high)
+ if (try_inc_mod_count(ep->module))
+ goto out;
+ }
read_unlock(&exec_domains_lock);
+#endif
- /* Should never get this far. */
- printk(KERN_ERR "No execution domain for personality 0x%02lx\n", pers);
- return NULL;
+ ep = &default_exec_domain;
+out:
+ read_unlock(&exec_domains_lock);
+ return (ep);
}
-int register_exec_domain(struct exec_domain *it)
+int
+register_exec_domain(struct exec_domain *ep)
{
- struct exec_domain *tmp;
+ struct exec_domain *tmp;
+ int err = -EBUSY;
- if (!it)
+ if (ep == NULL)
return -EINVAL;
- if (it->next)
+
+ if (ep->next != NULL)
return -EBUSY;
+
write_lock(&exec_domains_lock);
- for (tmp=exec_domains; tmp; tmp=tmp->next)
- if (tmp == it) {
- write_unlock(&exec_domains_lock);
- return -EBUSY;
- }
- it->next = exec_domains;
- exec_domains = it;
+ for (tmp = exec_domains; tmp; tmp = tmp->next) {
+ if (tmp == ep)
+ goto out;
+ }
+
+ ep->next = exec_domains;
+ exec_domains = ep;
+ err = 0;
+
+out:
write_unlock(&exec_domains_lock);
- return 0;
+ return (err);
}
-int unregister_exec_domain(struct exec_domain *it)
+int
+unregister_exec_domain(struct exec_domain *ep)
{
- struct exec_domain ** tmp;
+ struct exec_domain **epp;
- tmp = &exec_domains;
+ epp = &exec_domains;
write_lock(&exec_domains_lock);
- while (*tmp) {
- if (it == *tmp) {
- *tmp = it->next;
- it->next = NULL;
- write_unlock(&exec_domains_lock);
- return 0;
- }
- tmp = &(*tmp)->next;
+ for (epp = &exec_domains; *epp; epp = &(*epp)->next) {
+ if (ep == *epp)
+ goto unregister;
}
write_unlock(&exec_domains_lock);
return -EINVAL;
+
+unregister:
+ *epp = ep->next;
+ ep->next = NULL;
+ write_unlock(&exec_domains_lock);
+ return 0;
}
-void __set_personality(unsigned long personality)
+int
+__set_personality(u_long personality)
{
- struct exec_domain *it, *prev;
+ struct exec_domain *ep, *oep;
- it = lookup_exec_domain(personality);
- if (it == current->exec_domain) {
+ ep = lookup_exec_domain(personality);
+ if (ep == NULL)
+ return -EINVAL;
+ if (ep == current->exec_domain) {
current->personality = personality;
- return;
+ return 0;
}
- if (!it)
- return;
+
if (atomic_read(¤t->fs->count) != 1) {
- struct fs_struct *new = copy_fs_struct(current->fs);
- struct fs_struct *old;
- if (!new) {
- put_exec_domain(it);
- return;
+ struct fs_struct *fsp, *ofsp;
+
+ fsp = copy_fs_struct(current->fs);
+ if (fsp == NULL) {
+ put_exec_domain(ep);
+ return -ENOMEM;;
}
+
task_lock(current);
- old = current->fs;
- current->fs = new;
+ ofsp = current->fs;
+ current->fs = fsp;
task_unlock(current);
- put_fs_struct(old);
+
+ put_fs_struct(ofsp);
}
+
/*
* At that point we are guaranteed to be the sole owner of
* current->fs.
*/
+
current->personality = personality;
- prev = current->exec_domain;
- current->exec_domain = it;
+ oep = current->exec_domain;
+ current->exec_domain = ep;
set_fs_altroot();
- put_exec_domain(prev);
-}
-asmlinkage long sys_personality(unsigned long personality)
-{
- int ret = current->personality;
- if (personality != 0xffffffff) {
- set_personality(personality);
- if (current->personality != personality)
- ret = -EINVAL;
- }
- return ret;
+ put_exec_domain(oep);
+
+ printk(KERN_DEBUG "[%s:%d]: set personality to %lx\n",
+ current->comm, current->pid, personality);
+ return 0;
}
-int get_exec_domain_list(char * page)
+int
+get_exec_domain_list(char *page)
{
- int len = 0;
- struct exec_domain * e;
+ struct exec_domain *ep;
+ int len = 0;
read_lock(&exec_domains_lock);
- for (e=exec_domains; e && len < PAGE_SIZE - 80; e=e->next)
- len += sprintf(page+len, "%d-%d\t%-16s\t[%s]\n",
- e->pers_low, e->pers_high, e->name,
- e->module ? e->module->name : "kernel");
+ for (ep = exec_domains; ep && len < PAGE_SIZE - 80; ep = ep->next)
+ len += sprintf(page + len, "%d-%d\t%-16s\t[%s]\n",
+ ep->pers_low, ep->pers_high, ep->name,
+ ep->module ? ep->module->name : "kernel");
read_unlock(&exec_domains_lock);
- return len;
+ return (len);
}
+
+asmlinkage long
+sys_personality(u_long personality)
+{
+ if (personality == 0xffffffff)
+ goto ret;
+ set_personality(personality);
+ if (current->personality != personality)
+ return -EINVAL;
+ret:
+ return (current->personality);
+}
+
+
+EXPORT_SYMBOL(register_exec_domain);
+EXPORT_SYMBOL(unregister_exec_domain);
+EXPORT_SYMBOL(__set_personality);
+
+/*
+ * We have to have all sysctl handling for the Linux-ABI
+ * in one place as the dynamic registration of sysctls is
+ * horribly crufty in Linux <= 2.4.
+ *
+ * I hope the new sysctl schemes discussed for future versions
+ * will obsolete this.
+ *
+ * --hch
+ */
+
+u_long abi_defhandler_coff = PER_SCOSVR3;
+u_long abi_defhandler_elf = PER_LINUX;
+u_long abi_defhandler_lcall7 = PER_SVR4;
+u_long abi_defhandler_libcso = PER_SVR4;
+u_int abi_traceflg;
+int abi_fake_utsname;
+
+static struct ctl_table abi_table[] = {
+ {ABI_DEFHANDLER_COFF, "defhandler_coff", &abi_defhandler_coff,
+ sizeof(int), 0644, NULL, &proc_doulongvec_minmax},
+ {ABI_DEFHANDLER_ELF, "defhandler_elf", &abi_defhandler_elf,
+ sizeof(int), 0644, NULL, &proc_doulongvec_minmax},
+ {ABI_DEFHANDLER_LCALL7, "defhandler_lcall7", &abi_defhandler_lcall7,
+ sizeof(int), 0644, NULL, &proc_doulongvec_minmax},
+ {ABI_DEFHANDLER_LIBCSO, "defhandler_libcso", &abi_defhandler_libcso,
+ sizeof(int), 0644, NULL, &proc_doulongvec_minmax},
+ {ABI_TRACE, "trace", &abi_traceflg,
+ sizeof(u_int), 0644, NULL, &proc_dointvec},
+ {ABI_FAKE_UTSNAME, "fake_utsname", &abi_fake_utsname,
+ sizeof(int), 0644, NULL, &proc_dointvec},
+ {0}
+};
+
+static struct ctl_table abi_root_table[] = {
+ {CTL_ABI, "abi", NULL, 0, 0555, abi_table},
+ {0}
+};
+
+static int __init
+abi_register_sysctl(void)
+{
+ register_sysctl_table(abi_root_table, 1);
+ return 0;
+}
+
+__initcall(abi_register_sysctl);
+
+
+EXPORT_SYMBOL(abi_defhandler_coff);
+EXPORT_SYMBOL(abi_defhandler_elf);
+EXPORT_SYMBOL(abi_defhandler_lcall7);
+EXPORT_SYMBOL(abi_defhandler_libcso);
+EXPORT_SYMBOL(abi_traceflg);
+EXPORT_SYMBOL(abi_fake_utsname);
#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/completion.h>
+#include <linux/personality.h>
#include <linux/tty.h>
#ifdef CONFIG_BSD_PROCESS_ACCT
#include <linux/acct.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
+#include <linux/personality.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
EXPORT_SYMBOL(remove_arg_zero);
EXPORT_SYMBOL(set_binfmt);
-/* execution environment registration */
-EXPORT_SYMBOL(register_exec_domain);
-EXPORT_SYMBOL(unregister_exec_domain);
-EXPORT_SYMBOL(__set_personality);
-
/* sysctl table registration */
EXPORT_SYMBOL(register_sysctl_table);
EXPORT_SYMBOL(unregister_sysctl_table);
* between the file and the memory map for a potential last
* incomplete page. Ugly, but necessary.
*/
-void vmtruncate(struct inode * inode, loff_t offset)
+int vmtruncate(struct inode * inode, loff_t offset)
{
unsigned long pgoff;
struct address_space *mapping = inode->i_mapping;
unlock_kernel();
}
out:
- return;
+ return 0;
}
/*
* FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
*/
+#ifdef CONFIG_DEBUG_SLAB
+#define DEBUG 1
+#define STATS 1
+#define FORCED_DEBUG 1
+#else
#define DEBUG 0
#define STATS 0
#define FORCED_DEBUG 0
+#endif
/*
* Parameters for kmem_cache_reap
return cachep;
}
+
+#if DEBUG
+/*
+ * This check if the kmem_cache_t pointer is chained in the cache_cache
+ * list. -arca
+ */
+static int is_chained_kmem_cache(kmem_cache_t * cachep)
+{
+ struct list_head *p;
+ int ret = 0;
+
+ /* Find the cache in the chain of caches. */
+ down(&cache_chain_sem);
+ list_for_each(p, &cache_chain) {
+ if (p == &cachep->next) {
+ ret = 1;
+ break;
+ }
+ }
+ up(&cache_chain_sem);
+
+ return ret;
+}
+#else
+#define is_chained_kmem_cache(x) 1
+#endif
+
#ifdef CONFIG_SMP
/*
* Waits for all CPUs to execute func().
*/
int kmem_cache_shrink(kmem_cache_t *cachep)
{
- if (!cachep || in_interrupt())
+ if (!cachep || in_interrupt() || !is_chained_kmem_cache(cachep))
BUG();
return __kmem_cache_shrink(cachep);
/* Trim buffer in case of stray trailing data */
origlen = skb->len;
- skb_trim(skb, min(unsigned int, skb->len, ddphv.deh_len));
+ skb_trim(skb, min_t(unsigned int, skb->len, ddphv.deh_len));
/*
* Size check to see if ddp->deh_len was crap
}
/* Fix up skb->len field */
- skb_trim(skb, min(unsigned int, origlen, rt->dev->hard_header_len +
+ skb_trim(skb, min_t(unsigned int, origlen, rt->dev->hard_header_len +
ddp_dl->header_length + ddphv.deh_len));
/* Mend the byte order */
return -EFAULT;
valptr = (void *) &val;
- length = min(unsigned int, maxlen, sizeof(int));
+ length = min_t(unsigned int, maxlen, sizeof(int));
switch (optname) {
case AX25_WINDOW:
if (ax25_dev != NULL && ax25_dev->dev != NULL) {
strncpy(devname, ax25_dev->dev->name, IFNAMSIZ);
- length = min(unsigned int, strlen(ax25_dev->dev->name)+1, maxlen);
+ length = min_t(unsigned int, strlen(ax25_dev->dev->name)+1, maxlen);
devname[length-1] = '\0';
} else {
*devname = '\0';
{
if(iov->iov_len)
{
- int copy = min(unsigned int, iov->iov_len, len);
+ int copy = min_t(unsigned int, iov->iov_len, len);
if (copy_to_user(iov->iov_base, kdata, copy))
goto out;
kdata+=copy;
{
if(iov->iov_len)
{
- int copy = min(unsigned int, iov->iov_len, len);
+ int copy = min_t(unsigned int, iov->iov_len, len);
memcpy(iov->iov_base, kdata, copy);
kdata+=copy;
len-=copy;
{
if(iov->iov_len)
{
- int copy = min(unsigned int, len, iov->iov_len);
+ int copy = min_t(unsigned int, len, iov->iov_len);
if (copy_from_user(kdata, iov->iov_base, copy))
goto out;
len-=copy;
while (len > 0)
{
u8 *base = iov->iov_base + offset;
- int copy = min(unsigned int, len, iov->iov_len - offset);
+ int copy = min_t(unsigned int, len, iov->iov_len - offset);
offset = 0;
if (copy_from_user(kdata, base, copy))
while (len > 0)
{
u8 *base = iov->iov_base + offset;
- int copy = min(unsigned int, len, iov->iov_len - offset);
+ int copy = min_t(unsigned int, len, iov->iov_len - offset);
offset = 0;
/* There is a remnant from previous iov. */
ip_options_undo(opt);
- len = min(unsigned int, len, opt->optlen);
+ len = min_t(unsigned int, len, opt->optlen);
if(put_user(len, optlen))
return -EFAULT;
if(copy_to_user(optval, opt->__data, len))
case IP_MULTICAST_IF:
{
struct in_addr addr;
- len = min(unsigned int, len, sizeof(struct in_addr));
+ len = min_t(unsigned int, len, sizeof(struct in_addr));
addr.s_addr = sk->protinfo.af_inet.mc_addr;
release_sock(sk);
if(copy_to_user(optval,&ucval,1))
return -EFAULT;
} else {
- len = min(unsigned int, sizeof(int), len);
+ len = min_t(unsigned int, sizeof(int), len);
if(put_user(len, optlen))
return -EFAULT;
if(copy_to_user(optval,&val,len))
if (get_user(olr, optlen))
return -EFAULT;
- olr = min(unsigned int, olr, sizeof(int));
+ olr = min_t(unsigned int, olr, sizeof(int));
if (olr < 0)
return -EINVAL;
}
if (f->ipfw.fw_flg & IP_FW_F_NETLINK) {
#if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
- size_t len = min(unsigned int, f->ipfw.fw_outputsize, ntohs(ip->tot_len))
+ size_t len = min_t(unsigned int, f->ipfw.fw_outputsize, ntohs(ip->tot_len))
+ sizeof(__u32) + sizeof(skb->nfmark) + IFNAMSIZ;
struct sk_buff *outskb=alloc_skb(len, GFP_ATOMIC);
struct sk_buff *skb=alloc_skb(128, GFP_ATOMIC);
if(skb)
{
- int len = min(unsigned int,
+ int len = min_t(unsigned int,
128, ntohs(ip->tot_len));
skb_put(skb,len);
equilibrium = ipv4_dst_ops.gc_thresh;
goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
if (goal > 0) {
- equilibrium += min(unsigned int, goal / 2, rt_hash_mask + 1);
+ equilibrium += min_t(unsigned int, goal / 2, rt_hash_mask + 1);
goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
}
} else {
/* We are in dangerous area. Try to reduce cache really
* aggressively.
*/
- goal = max(unsigned int, goal / 2, rt_hash_mask + 1);
+ goal = max_t(unsigned int, goal / 2, rt_hash_mask + 1);
equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
}
if (rt->u.dst.pmtu > IP_MAX_MTU)
rt->u.dst.pmtu = IP_MAX_MTU;
if (rt->u.dst.advmss == 0)
- rt->u.dst.advmss = max(unsigned int, rt->u.dst.dev->mtu - 40,
+ rt->u.dst.advmss = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
ip_rt_min_advmss);
if (rt->u.dst.advmss > 65535 - 40)
rt->u.dst.advmss = 65535 - 40;
page = pages[poffset/PAGE_SIZE];
offset = poffset % PAGE_SIZE;
- size = min(unsigned int, psize, PAGE_SIZE-offset);
+ size = min_t(unsigned int, psize, PAGE_SIZE-offset);
if (tp->send_head==NULL || (copy = mss_now - skb->len) <= 0) {
new_segment:
if(get_user(len,optlen))
return -EFAULT;
- len = min(unsigned int, len, sizeof(int));
+ len = min_t(unsigned int, len, sizeof(int));
if(len < 0)
return -EINVAL;
info.tcpi_advmss = tp->advmss;
info.tcpi_reordering = tp->reordering;
- len = min(unsigned int, len, sizeof(info));
+ len = min_t(unsigned int, len, sizeof(info));
if(put_user(len, optlen))
return -EFAULT;
if(copy_to_user(optval, &info,len))
if (quickacks==0)
quickacks=2;
if (quickacks > tp->ack.quick)
- tp->ack.quick = min(unsigned int, quickacks, TCP_MAX_QUICKACKS);
+ tp->ack.quick = min_t(unsigned int, quickacks, TCP_MAX_QUICKACKS);
}
void tcp_enter_quickack_mode(struct tcp_opt *tp)
int sndmem = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
if (sk->sndbuf < 3*sndmem)
- sk->sndbuf = min(int, 3*sndmem, sysctl_tcp_wmem[2]);
+ sk->sndbuf = min_t(int, 3*sndmem, sysctl_tcp_wmem[2]);
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
incr = __tcp_grow_window(sk, tp, skb);
if (incr) {
- tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh + incr, tp->window_clamp);
+ tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh + incr, tp->window_clamp);
tp->ack.quick |= 1;
}
}
while (tcp_win_from_space(rcvmem) < tp->advmss)
rcvmem += 128;
if (sk->rcvbuf < 4*rcvmem)
- sk->rcvbuf = min(int, 4*rcvmem, sysctl_tcp_rmem[2]);
+ sk->rcvbuf = min_t(int, 4*rcvmem, sysctl_tcp_rmem[2]);
}
/* 4. Try to fixup all. It is made iimediately after connection enters
tp->window_clamp = maxwin;
if (sysctl_tcp_app_win && maxwin>4*tp->advmss)
- tp->window_clamp = max(u32, maxwin-(maxwin>>sysctl_tcp_app_win), 4*tp->advmss);
+ tp->window_clamp = max_t(u32, maxwin-(maxwin>>sysctl_tcp_app_win), 4*tp->advmss);
}
/* Force reservation of one segment. */
if (sysctl_tcp_app_win &&
tp->window_clamp > 2*tp->advmss &&
tp->window_clamp + tp->advmss > maxwin)
- tp->window_clamp = max(u32, 2*tp->advmss, maxwin-tp->advmss);
+ tp->window_clamp = max_t(u32, 2*tp->advmss, maxwin-tp->advmss);
- tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh, tp->window_clamp);
+ tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh, tp->window_clamp);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
!(sk->userlocks&SOCK_RCVBUF_LOCK) &&
!tcp_memory_pressure &&
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
- sk->rcvbuf = min(int, atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]);
+ sk->rcvbuf = min_t(int, atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]);
}
if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf) {
app_win += ofo_win;
app_win >>= 1;
if (app_win > tp->ack.rcv_mss)
app_win -= tp->ack.rcv_mss;
- app_win = max(unsigned int, app_win, 2*tp->advmss);
+ app_win = max_t(unsigned int, app_win, 2*tp->advmss);
if (!ofo_win)
- tp->window_clamp = min(u32, tp->window_clamp, app_win);
- tp->rcv_ssthresh = min(u32, tp->window_clamp, 2*tp->advmss);
+ tp->window_clamp = min_t(u32, tp->window_clamp, app_win);
+ tp->rcv_ssthresh = min_t(u32, tp->window_clamp, 2*tp->advmss);
}
}
/* no previous measure. */
tp->srtt = m<<3; /* take the measured time to be rtt */
tp->mdev = m<<2; /* make sure rto = 3*rtt */
- tp->mdev_max = tp->rttvar = max(u32, tp->mdev, TCP_RTO_MIN);
+ tp->mdev_max = tp->rttvar = max_t(u32, tp->mdev, TCP_RTO_MIN);
tp->rtt_seq = tp->snd_nxt;
}
}
tp->ca_state == TCP_CA_Open) {
/* Cong. avoidance phase, cwnd is reliable. */
if (!(dst->mxlock&(1<<RTAX_SSTHRESH)))
- dst->ssthresh = max(u32, tp->snd_cwnd>>1, tp->snd_ssthresh);
+ dst->ssthresh = max_t(u32, tp->snd_cwnd>>1, tp->snd_ssthresh);
if (!(dst->mxlock&(1<<RTAX_CWND)))
dst->cwnd = (dst->cwnd + tp->snd_cwnd)>>1;
} else {
else if (cwnd > tp->snd_ssthresh)
cwnd = tp->snd_ssthresh;
- return min(u32, cwnd, tp->snd_cwnd_clamp);
+ return min_t(u32, cwnd, tp->snd_cwnd_clamp);
}
/* Initialize metrics on socket. */
tp->srtt = dst->rtt;
if (dst->rttvar > tp->mdev) {
tp->mdev = dst->rttvar;
- tp->mdev_max = tp->rttvar = max(u32, tp->mdev, TCP_RTO_MIN);
+ tp->mdev_max = tp->rttvar = max_t(u32, tp->mdev, TCP_RTO_MIN);
}
tcp_set_rto(tp);
tcp_bound_rto(tp);
static void tcp_update_reordering(struct tcp_opt *tp, int metric, int ts)
{
if (metric > tp->reordering) {
- tp->reordering = min(unsigned int, TCP_MAX_REORDERING, metric);
+ tp->reordering = min_t(unsigned int, TCP_MAX_REORDERING, metric);
/* This exciting event is worth to be remembered. 8) */
if (ts)
if (sacked&TCPCB_RETRANS) {
if ((dup_sack && in_sack) &&
(sacked&TCPCB_SACKED_ACKED))
- reord = min(int, fack_count, reord);
+ reord = min_t(int, fack_count, reord);
} else {
/* If it was in a hole, we detected reordering. */
if (fack_count < prior_fackets &&
!(sacked&TCPCB_SACKED_ACKED))
- reord = min(int, fack_count, reord);
+ reord = min_t(int, fack_count, reord);
}
/* Nothing to do; acked frame is about to be dropped. */
*/
if (!(sacked & TCPCB_RETRANS) &&
fack_count < prior_fackets)
- reord = min(int, fack_count, reord);
+ reord = min_t(int, fack_count, reord);
if (sacked & TCPCB_LOST) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
tp->fackets_out = fack_count;
} else {
if (dup_sack && (sacked&TCPCB_RETRANS))
- reord = min(int, fack_count, reord);
+ reord = min_t(int, fack_count, reord);
}
/* D-SACK. We can detect redundant retransmission
}
tcp_sync_left_out(tp);
- tp->reordering = min(unsigned int, tp->reordering, sysctl_tcp_reordering);
+ tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering);
tp->ca_state = TCP_CA_Loss;
tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
* recovery more?
*/
if (tp->packets_out <= tp->reordering &&
- tp->sacked_out >= max(u32, tp->packets_out/2, sysctl_tcp_reordering) &&
+ tp->sacked_out >= max_t(u32, tp->packets_out/2, sysctl_tcp_reordering) &&
!tcp_may_send_now(sk, tp)) {
/* We have nothing to send. This connection is limited
* either by receiver window or by application.
*/
static void tcp_check_reno_reordering(struct tcp_opt *tp, int addend)
{
- u32 holes = min(unsigned int,
- max(unsigned int, tp->lost_out, 1),
+ u32 holes = min_t(unsigned int,
+ max_t(unsigned int, tp->lost_out, 1),
tp->packets_out);
if (tp->sacked_out + holes > tp->packets_out) {
*/
static __inline__ void tcp_moderate_cwnd(struct tcp_opt *tp)
{
- tp->snd_cwnd = min(u32, tp->snd_cwnd,
+ tp->snd_cwnd = min_t(u32, tp->snd_cwnd,
tcp_packets_in_flight(tp)+tcp_max_burst(tp));
tp->snd_cwnd_stamp = tcp_time_stamp;
}
if (decr && tp->snd_cwnd > tp->snd_ssthresh/2)
tp->snd_cwnd -= decr;
- tp->snd_cwnd = min(u32, tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
+ tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
static void tcp_undo_cwr(struct tcp_opt *tp, int undo)
{
if (tp->prior_ssthresh) {
- tp->snd_cwnd = max(unsigned int,
+ tp->snd_cwnd = max_t(unsigned int,
tp->snd_cwnd, tp->snd_ssthresh<<1);
if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
TCP_ECN_withdraw_cwr(tp);
}
} else {
- tp->snd_cwnd = max(unsigned int, tp->snd_cwnd, tp->snd_ssthresh);
+ tp->snd_cwnd = max_t(unsigned int, tp->snd_cwnd, tp->snd_ssthresh);
}
tcp_moderate_cwnd(tp);
tp->snd_cwnd_stamp = tcp_time_stamp;
static __inline__ void tcp_complete_cwr(struct tcp_opt *tp)
{
- tp->snd_cwnd = min(u32, tp->snd_cwnd, tp->snd_ssthresh);
+ tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_ssthresh);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
*/
} else {
tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0,
- min(u32, tp->rto << tp->backoff, TCP_RTO_MAX));
+ min_t(u32, tp->rto << tp->backoff, TCP_RTO_MAX));
}
}
tp->dsack = 1;
tp->duplicate_sack[0].start_seq = seq;
tp->duplicate_sack[0].end_seq = end_seq;
- tp->eff_sacks = min(unsigned int, tp->num_sacks+1, 4-tp->tstamp_ok);
+ tp->eff_sacks = min_t(unsigned int, tp->num_sacks+1, 4-tp->tstamp_ok);
}
}
* Decrease num_sacks.
*/
tp->num_sacks--;
- tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+ tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
for(i=this_sack; i < tp->num_sacks; i++)
sp[i] = sp[i+1];
continue;
sp->start_seq = seq;
sp->end_seq = end_seq;
tp->num_sacks++;
- tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+ tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
}
/* RCV.NXT advances, some SACKs should be eaten. */
}
if (num_sacks != tp->num_sacks) {
tp->num_sacks = num_sacks;
- tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
+ tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
}
}
if (tp->dsack) {
tp->dsack = 0;
- tp->eff_sacks = min(unsigned int, tp->num_sacks, 4-tp->tstamp_ok);
+ tp->eff_sacks = min_t(unsigned int, tp->num_sacks, 4-tp->tstamp_ok);
}
/* Queue data for delivery to the user.
tp->ucopy.len &&
sk->lock.users &&
!tp->urg_data) {
- int chunk = min(unsigned int, skb->len, tp->ucopy.len);
+ int chunk = min_t(unsigned int, skb->len, tp->ucopy.len);
__set_current_state(TASK_RUNNING);
if (offset < 0) BUG();
if (size > 0) {
- size = min(int, copy, size);
+ size = min_t(int, copy, size);
if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
BUG();
TCP_SKB_CB(nskb)->end_seq += size;
if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf)
tcp_clamp_window(sk, tp);
else if (tcp_memory_pressure)
- tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh, 4*tp->advmss);
+ tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh, 4*tp->advmss);
tcp_collapse_ofo_queue(sk);
tcp_collapse(sk, sk->receive_queue.next,
if (tp->ca_state == TCP_CA_Open &&
sk->socket && !test_bit(SOCK_NOSPACE, &sk->socket->flags)) {
/* Limited by application or receiver window. */
- u32 win_used = max(u32, tp->snd_cwnd_used, 2);
+ u32 win_used = max_t(u32, tp->snd_cwnd_used, 2);
if (win_used < tp->snd_cwnd) {
tp->snd_ssthresh = tcp_current_ssthresh(tp);
tp->snd_cwnd = (tp->snd_cwnd+win_used)>>1;
int sndmem, demanded;
sndmem = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
- demanded = max(unsigned int, tp->snd_cwnd, tp->reordering+1);
+ demanded = max_t(unsigned int, tp->snd_cwnd, tp->reordering+1);
sndmem *= 2*demanded;
if (sndmem > sk->sndbuf)
- sk->sndbuf = min(int, sndmem, sysctl_tcp_wmem[2]);
+ sk->sndbuf = min_t(int, sndmem, sysctl_tcp_wmem[2]);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
if (tp->wscale_ok == 0) {
tp->snd_wscale = tp->rcv_wscale = 0;
- tp->window_clamp = min(u32, tp->window_clamp, 65535);
+ tp->window_clamp = min_t(u32, tp->window_clamp, 65535);
}
if (tp->saw_tstamp) {
newtp->rcv_wscale = req->rcv_wscale;
} else {
newtp->snd_wscale = newtp->rcv_wscale = 0;
- newtp->window_clamp = min(u32, newtp->window_clamp, 65535);
+ newtp->window_clamp = min_t(u32, newtp->window_clamp, 65535);
}
newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->snd_wscale;
newtp->max_window = newtp->snd_wnd;
u32 cwnd = tp->snd_cwnd;
tp->snd_ssthresh = tcp_current_ssthresh(tp);
- restart_cwnd = min(u32, restart_cwnd, cwnd);
+ restart_cwnd = min_t(u32, restart_cwnd, cwnd);
while ((delta -= tp->rto) > 0 && cwnd > restart_cwnd)
cwnd >>= 1;
- tp->snd_cwnd = max(u32, cwnd, restart_cwnd);
+ tp->snd_cwnd = max_t(u32, cwnd, restart_cwnd);
tp->snd_cwnd_stamp = tcp_time_stamp;
tp->snd_cwnd_used = 0;
}
/* Bound mss with half of window */
if (tp->max_window && mss_now > (tp->max_window>>1))
- mss_now = max(u32, (tp->max_window>>1), 68 - tp->tcp_header_len);
+ mss_now = max_t(u32, (tp->max_window>>1), 68 - tp->tcp_header_len);
/* And store cached results */
tp->pmtu_cookie = pmtu;
*/
int mss = tp->ack.rcv_mss;
int free_space = tcp_space(sk);
- int full_space = min(unsigned int, tp->window_clamp, tcp_full_space(sk));
+ int full_space = min_t(unsigned int, tp->window_clamp, tcp_full_space(sk));
int window;
if (mss > full_space)
tp->ack.quick = 0;
if (tcp_memory_pressure)
- tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh, 4*tp->advmss);
+ tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh, 4*tp->advmss);
if (free_space < mss)
return 0;
/* Do not sent more than we queued. 1/4 is reserved for possible
* copying overhead: frgagmentation, tunneling, mangling etc.
*/
- if (atomic_read(&sk->wmem_alloc) > min(int, sk->wmem_queued+(sk->wmem_queued>>2),sk->sndbuf))
+ if (atomic_read(&sk->wmem_alloc) > min_t(int, sk->wmem_queued+(sk->wmem_queued>>2),sk->sndbuf))
return -EAGAIN;
/* If receiver has shrunk his window, and skb is out of
* directly.
*/
if (tp->srtt) {
- int rtt = max(unsigned int, tp->srtt>>3, TCP_DELACK_MIN);
+ int rtt = max_t(unsigned int, tp->srtt>>3, TCP_DELACK_MIN);
if (rtt < max_ato)
max_ato = rtt;
}
- ato = min(int, ato, max_ato);
+ ato = min_t(int, ato, max_ato);
}
/* Stay within the limit we were given */
*/
if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
skb->len > mss) {
- seg_size = min(int, seg_size, mss);
+ seg_size = min_t(int, seg_size, mss);
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
if (tcp_fragment(sk, skb, seg_size))
return -1;
tp->backoff++;
tp->probes_out++;
tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0,
- min(u32, tp->rto << tp->backoff, TCP_RTO_MAX));
+ min_t(u32, tp->rto << tp->backoff, TCP_RTO_MAX));
} else {
/* If packet was not sent due to local congestion,
* do not backoff and do not remember probes_out.
if (!tp->probes_out)
tp->probes_out=1;
tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0,
- min(unsigned int, tp->rto << tp->backoff, TCP_RESOURCE_PROBE_INTERVAL));
+ min_t(unsigned int, tp->rto << tp->backoff, TCP_RESOURCE_PROBE_INTERVAL));
}
}
if (tcp_ack_scheduled(tp)) {
if (!tp->ack.pingpong) {
/* Delayed ACK missed: inflate ATO. */
- tp->ack.ato = min(u32, tp->ack.ato << 1, tp->rto);
+ tp->ack.ato = min_t(u32, tp->ack.ato << 1, tp->rto);
} else {
/* Delayed ACK missed: leave pingpong mode and
* deflate ATO.
if (!tp->retransmits)
tp->retransmits=1;
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS,
- min(u32, tp->rto, TCP_RESOURCE_PROBE_INTERVAL));
+ min_t(u32, tp->rto, TCP_RESOURCE_PROBE_INTERVAL));
goto out;
}
tp->retransmits++;
out_reset_timer:
- tp->rto = min(u32, tp->rto << 1, TCP_RTO_MAX);
+ tp->rto = min_t(u32, tp->rto << 1, TCP_RTO_MAX);
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
if (tp->retransmits > sysctl_tcp_retries1)
__sk_dst_reset(sk);
if (req->retrans++ == 0)
lopt->qlen_young--;
- timeo = min(unsigned long,
+ timeo = min_t(unsigned long,
(TCP_TIMEOUT_INIT << req->retrans),
TCP_RTO_MAX);
req->expires = now + timeo;
msg.daddr = &hdr->saddr;
len = skb->len - msg.offset + sizeof(struct icmp6hdr);
- len = min(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr));
+ len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr));
if (len < 0) {
if (net_ratelimit())
return -EINVAL;
#endif
}
- len = min(unsigned int, sizeof(int), len);
+ len = min_t(unsigned int, sizeof(int), len);
if(put_user(len, optlen))
return -EFAULT;
if(copy_to_user(optval,&val,len))
}
}
- rd_len = min(unsigned int,
+ rd_len = min_t(unsigned int,
IPV6_MIN_MTU-sizeof(struct ipv6hdr)-len, skb->len + 8);
rd_len &= ~0x7;
len += rd_len;
return -ENOPROTOOPT;
}
- len = min(unsigned int, sizeof(int), len);
+ len = min_t(unsigned int, sizeof(int), len);
if (put_user(len, optlen))
return -EFAULT;
install_route:
rt->u.dst.pmtu = ipv6_get_mtu(dev);
- rt->u.dst.advmss = max(unsigned int, rt->u.dst.pmtu - 60, ip6_rt_min_advmss);
+ rt->u.dst.advmss = max_t(unsigned int, rt->u.dst.pmtu - 60, ip6_rt_min_advmss);
/* Maximal non-jumbo IPv6 payload is 65535 and corresponding
MSS is 65535 - tcp_header_size. 65535 is also valid and
means: "any MSS, rely only on pmtu discovery"
nrt->rt6i_nexthop = neigh_clone(neigh);
/* Reset pmtu, it may be better */
nrt->u.dst.pmtu = ipv6_get_mtu(neigh->dev);
- nrt->u.dst.advmss = max(unsigned int, nrt->u.dst.pmtu - 60, ip6_rt_min_advmss);
+ nrt->u.dst.advmss = max_t(unsigned int, nrt->u.dst.pmtu - 60, ip6_rt_min_advmss);
if (rt->u.dst.advmss > 65535-20)
rt->u.dst.advmss = 65535;
nrt->rt6i_hoplimit = ipv6_get_hoplimit(neigh->dev);
rt->u.dst.output = ip6_output;
rt->rt6i_dev = dev_get_by_name("lo");
rt->u.dst.pmtu = ipv6_get_mtu(rt->rt6i_dev);
- rt->u.dst.advmss = max(unsigned int, rt->u.dst.pmtu - 60, ip6_rt_min_advmss);
+ rt->u.dst.advmss = max_t(unsigned int, rt->u.dst.pmtu - 60, ip6_rt_min_advmss);
if (rt->u.dst.advmss > 65535-20)
rt->u.dst.advmss = 65535;
rt->rt6i_hoplimit = ipv6_get_hoplimit(rt->rt6i_dev);
rt->u.dst.pmtu > arg->mtu &&
!(rt->u.dst.mxlock&(1<<RTAX_MTU)))
rt->u.dst.pmtu = arg->mtu;
- rt->u.dst.advmss = max(unsigned int, arg->mtu - 60, ip6_rt_min_advmss);
+ rt->u.dst.advmss = max_t(unsigned int, arg->mtu - 60, ip6_rt_min_advmss);
if (rt->u.dst.advmss > 65535-20)
rt->u.dst.advmss = 65535;
return 0;
if (get_user(len, optlen))
goto out;
- len = min(unsigned int, len, sizeof(int));
+ len = min_t(unsigned int, len, sizeof(int));
ret = -EINVAL;
if(len < 0)
goto out;
continue;
}
- chunk = min(unsigned int, skb->len, size);
+ chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
skb_queue_head(&sk->receive_queue, skb);
if (copied == 0)
Space = sock_wspace(CurrentRequest->sock->sk);
- ReadSize = min(int, 4 * 4096, CurrentRequest->FileLength - CurrentRequest->BytesSent);
- ReadSize = min(int, ReadSize, Space);
+ ReadSize = min_t(int, 4 * 4096, CurrentRequest->FileLength - CurrentRequest->BytesSent);
+ ReadSize = min_t(int, ReadSize, Space);
if (ReadSize>0)
{
strncpy(Head->FileName,sysctl_khttpd_docroot,sizeof(Head->FileName));
PrefixLen = strlen(sysctl_khttpd_docroot);
- Head->FileNameLength = min(unsigned int, 255, tmp - Buffer + PrefixLen);
+ Head->FileNameLength = min_t(unsigned int, 255, tmp - Buffer + PrefixLen);
- strncat(Head->FileName,Buffer,min(unsigned int, 255 - PrefixLen, tmp - Buffer));
+ strncat(Head->FileName,Buffer,min_t(unsigned int, 255 - PrefixLen, tmp - Buffer));
Buffer=EOL+1;
#ifdef BENCHMARK
{
Buffer+=19;
- strncpy(Head->IMS,Buffer,min(unsigned int, 127,EOL-Buffer-1));
+ strncpy(Head->IMS,Buffer,min_t(unsigned int, 127,EOL-Buffer-1));
Buffer=EOL+1;
continue;
{
Buffer+=12;
- strncpy(Head->Agent,Buffer,min(unsigned int, 127,EOL-Buffer-1));
+ strncpy(Head->Agent,Buffer,min_t(unsigned int, 127,EOL-Buffer-1));
Buffer=EOL+1;
continue;
{
Buffer+=6;
- strncpy(Head->Host,Buffer,min(unsigned int, 127,EOL-Buffer-1));
+ strncpy(Head->Host,Buffer,min_t(unsigned int, 127,EOL-Buffer-1));
Buffer=EOL+1;
continue;
Request->Time = Request->filp->f_dentry->d_inode->i_mtime;
Request->IMS_Time = mimeTime_to_UnixTime(Request->IMS);
sprintf(Request->LengthS,"%i",Request->FileLength);
- time_Unix2RFC(min(unsigned int, Request->Time,CurrentTime_i),Request->TimeS);
+ time_Unix2RFC(min_t(unsigned int, Request->Time,CurrentTime_i),Request->TimeS);
/* The min() is required by rfc1945, section 10.10:
It is not allowed to send a filetime in the future */
return -ENOPROTOOPT;
}
- len = min(unsigned int, len, sizeof(int));
+ len = min_t(unsigned int, len, sizeof(int));
if (put_user(len, optlen))
return -EFAULT;
return -ENOPROTOOPT;
}
- len = min(unsigned int, len, sizeof(int));
+ len = min_t(unsigned int, len, sizeof(int));
if (put_user(len, optlen))
return -EFAULT;
}
if (!netif_queue_stopped(sch->dev)) {
- long delay = PSCHED_US2JIFFIE(max(long, -toks, -ptoks));
+ long delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
if (delay == 0)
delay = 1;
if (xprt->tcp_copied >= sizeof(xprt->tcp_xid) || !avail)
goto done;
- want = min(unsigned int, sizeof(xprt->tcp_xid) - xprt->tcp_copied, avail);
+ want = min_t(unsigned int, sizeof(xprt->tcp_xid) - xprt->tcp_copied, avail);
do {
dprintk("RPC: reading xid (%d bytes)\n", want);
riov.iov_base = ((u8*) &xprt->tcp_xid) + xprt->tcp_copied;
if (req->rq_rlen <= xprt->tcp_copied || !avail)
goto done;
- want = min(unsigned int, req->rq_rlen - xprt->tcp_copied, avail);
+ want = min_t(unsigned int, req->rq_rlen - xprt->tcp_copied, avail);
do {
dprintk("RPC: %4d TCP receiving %d bytes\n",
req->rq_task->tk_pid, want);
int want, result = 0;
while (avail) {
- want = min(unsigned int, avail, sizeof(dummy));
+ want = min_t(unsigned int, avail, sizeof(dummy));
riov.iov_base = dummy;
riov.iov_len = want;
dprintk("RPC: TCP skipping %d bytes\n", want);
/* Wait until we have enough socket memory */
- if (sock_wspace(sk) < min(int, sk->sndbuf,XPRT_MIN_WRITE_SPACE))
+ if (sock_wspace(sk) < min_t(int, sk->sndbuf,XPRT_MIN_WRITE_SPACE))
return;
if (!xprt_test_and_set_wspace(xprt)) {
* fallback size buffer which is under a page and will
* succeed. [Alan]
*/
- size = min(int, size, skb_tailroom(skb));
+ size = min_t(int, size, skb_tailroom(skb));
memcpy(UNIXCREDS(skb), &scm->creds, sizeof(struct ucred));
if (scm->fp)
sunaddr = NULL;
}
- chunk = min(unsigned int, skb->len, size);
+ chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
skb_queue_head(&sk->receive_queue, skb);
if (copied == 0)
pos = dent->get_info(page, dent->data, 0, 0);
offs = file->f_pos;
if (offs < pos) {
- len = min(unsigned int, pos - offs, count);
+ len = min_t(unsigned int, pos - offs, count);
if (copy_to_user(buf, (page + offs), len)) {
kfree(page);
return -EFAULT;
pos = dent->get_info(page, dent->data, 0, 0, 0);
offs = file->f_pos;
if (offs < pos) {
- len = min(unsigned int, pos - offs, count);
+ len = min_t(unsigned int, pos - offs, count);
if (copy_to_user(buf, (page + offs), len)) {
kfree(page);
return -EFAULT;
pos = dent->get_info(page, dent->data, 0, 0, 0);
offs = file->f_pos;
if (offs < pos) {
- len = min(unsigned int, pos - offs, count);
+ len = min_t(unsigned int, pos - offs, count);
memcpy_tofs((void*)buf, (void*)(page + offs), len);
file->f_pos += len;
}
return -ENOPROTOOPT;
}
- len = min(unsigned int, len, sizeof(int));
+ len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;