diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 2 | ||||
-rw-r--r-- | mm/bounce.c | 5 | ||||
-rw-r--r-- | mm/memory.c | 70 | ||||
-rw-r--r-- | mm/mempolicy.c | 9 | ||||
-rw-r--r-- | mm/migrate.c | 68 | ||||
-rw-r--r-- | mm/mlock.c | 45 | ||||
-rw-r--r-- | mm/oom_kill.c | 12 | ||||
-rw-r--r-- | mm/page_cgroup.c | 3 | ||||
-rw-r--r-- | mm/shmem.c | 8 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 13 | ||||
-rw-r--r-- | mm/swapfile.c | 9 | ||||
-rw-r--r-- | mm/vmalloc.c | 2 |
14 files changed, 183 insertions, 67 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 2a56124dbc2..801c08b046e 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -176,7 +176,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, int ret = 0; struct device *dev; - if (WARN_ON(bdi->dev)) + if (bdi->dev) /* The driver needs to use separate queues per device */ goto exit; va_start(args, fmt); diff --git a/mm/bounce.c b/mm/bounce.c index 06722c40305..bf0cf7c8387 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -14,6 +14,7 @@ #include <linux/hash.h> #include <linux/highmem.h> #include <linux/blktrace_api.h> +#include <trace/block.h> #include <asm/tlbflush.h> #define POOL_SIZE 64 @@ -21,6 +22,8 @@ static mempool_t *page_pool, *isa_page_pool; +DEFINE_TRACE(block_bio_bounce); + #ifdef CONFIG_HIGHMEM static __init int init_emergency_pool(void) { @@ -222,7 +225,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, if (!bio) return; - blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); + trace_block_bio_bounce(q, *bio_orig); /* * at least one page was bounced, fill in possible non-highmem diff --git a/mm/memory.c b/mm/memory.c index 164951c4730..f01b7eed6e1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -669,6 +669,16 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (is_vm_hugetlb_page(vma)) return copy_hugetlb_page_range(dst_mm, src_mm, vma); + if (unlikely(is_pfn_mapping(vma))) { + /* + * We do not free on error cases below as remove_vma + * gets called on error from higher level routine + */ + ret = track_pfn_vma_copy(vma); + if (ret) + return ret; + } + /* * We need to invalidate the secondary MMU mappings only when * there could be a permission downgrade on the ptes of the @@ -915,6 +925,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, if (vma->vm_flags & VM_ACCOUNT) *nr_accounted += (end - start) >> PAGE_SHIFT; + if (unlikely(is_pfn_mapping(vma))) + untrack_pfn_vma(vma, 0, 0); + while (start != end) { if (!tlb_start_valid) { tlb_start = start; @@ -1430,6 +1443,7 @@ out: int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { + int ret; /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like @@ -1444,7 +1458,15 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; - return insert_pfn(vma, addr, pfn, vma->vm_page_prot); + if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE)) + return -EINVAL; + + ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot); + + if (ret) + untrack_pfn_vma(vma, pfn, PAGE_SIZE); + + return ret; } EXPORT_SYMBOL(vm_insert_pfn); @@ -1575,14 +1597,17 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, * behaviour that some programs depend on. We mark the "original" * un-COW'ed pages by matching them up with "vma->vm_pgoff". */ - if (is_cow_mapping(vma->vm_flags)) { - if (addr != vma->vm_start || end != vma->vm_end) - return -EINVAL; + if (addr == vma->vm_start && end == vma->vm_end) vma->vm_pgoff = pfn; - } + else if (is_cow_mapping(vma->vm_flags)) + return -EINVAL; vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; + err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); + if (err) + return -EINVAL; + BUG_ON(addr >= end); pfn -= addr >> PAGE_SHIFT; pgd = pgd_offset(mm, addr); @@ -1594,6 +1619,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, if (err) break; } while (pgd++, addr = next, addr != end); + + if (err) + untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size)); + return err; } EXPORT_SYMBOL(remap_pfn_range); @@ -2865,9 +2894,9 @@ int in_gate_area_no_task(unsigned long addr) #endif /* __HAVE_ARCH_GATE_AREA */ #ifdef CONFIG_HAVE_IOREMAP_PROT -static resource_size_t follow_phys(struct vm_area_struct *vma, - unsigned long address, unsigned int flags, - unsigned long *prot) +int follow_phys(struct vm_area_struct *vma, + unsigned long address, unsigned int flags, + unsigned long *prot, resource_size_t *phys) { pgd_t *pgd; pud_t *pud; @@ -2876,24 +2905,26 @@ static resource_size_t follow_phys(struct vm_area_struct *vma, spinlock_t *ptl; resource_size_t phys_addr = 0; struct mm_struct *mm = vma->vm_mm; + int ret = -EINVAL; - VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP))); + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) + goto out; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - goto no_page_table; + goto out; pud = pud_offset(pgd, address); if (pud_none(*pud) || unlikely(pud_bad(*pud))) - goto no_page_table; + goto out; pmd = pmd_offset(pud, address); if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - goto no_page_table; + goto out; /* We cannot handle huge page PFN maps. Luckily they don't exist. */ if (pmd_huge(*pmd)) - goto no_page_table; + goto out; ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) @@ -2908,13 +2939,13 @@ static resource_size_t follow_phys(struct vm_area_struct *vma, phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ *prot = pgprot_val(pte_pgprot(pte)); + *phys = phys_addr; + ret = 0; unlock: pte_unmap_unlock(ptep, ptl); out: - return phys_addr; -no_page_table: - return 0; + return ret; } int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, @@ -2925,12 +2956,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *maddr; int offset = addr & (PAGE_SIZE-1); - if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) - return -EINVAL; - - phys_addr = follow_phys(vma, addr, write, &prot); - - if (!phys_addr) + if (follow_phys(vma, addr, write, &prot, &phys_addr)) return -EINVAL; maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e9493b1c111..e412ffa8e52 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1114,6 +1114,7 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, const unsigned long __user *old_nodes, const unsigned long __user *new_nodes) { + const struct cred *cred = current_cred(), *tcred; struct mm_struct *mm; struct task_struct *task; nodemask_t old; @@ -1148,12 +1149,16 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, * capabilities, superuser privileges or the same * userid as the target process. */ - if ((current->euid != task->suid) && (current->euid != task->uid) && - (current->uid != task->suid) && (current->uid != task->uid) && + rcu_read_lock(); + tcred = __task_cred(task); + if (cred->euid != tcred->suid && cred->euid != tcred->uid && + cred->uid != tcred->suid && cred->uid != tcred->uid && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); err = -EPERM; goto out; } + rcu_read_unlock(); task_nodes = cpuset_mems_allowed(task); /* Is the user allowed to access the target nodes? */ diff --git a/mm/migrate.c b/mm/migrate.c index 1e0d6b237f4..21631ab8c08 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -987,25 +987,18 @@ out: /* * Determine the nodes of an array of pages and store it in an array of status. */ -static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, - const void __user * __user *pages, - int __user *status) +static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, + const void __user **pages, int *status) { unsigned long i; - int err; down_read(&mm->mmap_sem); for (i = 0; i < nr_pages; i++) { - const void __user *p; - unsigned long addr; + unsigned long addr = (unsigned long)(*pages); struct vm_area_struct *vma; struct page *page; - - err = -EFAULT; - if (get_user(p, pages+i)) - goto out; - addr = (unsigned long) p; + int err = -EFAULT; vma = find_vma(mm, addr); if (!vma) @@ -1024,12 +1017,52 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, err = page_to_nid(page); set_status: - put_user(err, status+i); + *status = err; + + pages++; + status++; + } + + up_read(&mm->mmap_sem); +} + +/* + * Determine the nodes of a user array of pages and store it in + * a user array of status. + */ +static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, + const void __user * __user *pages, + int __user *status) +{ +#define DO_PAGES_STAT_CHUNK_NR 16 + const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; + int chunk_status[DO_PAGES_STAT_CHUNK_NR]; + unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR; + int err; + + for (i = 0; i < nr_pages; i += chunk_nr) { + if (chunk_nr + i > nr_pages) + chunk_nr = nr_pages - i; + + err = copy_from_user(chunk_pages, &pages[i], + chunk_nr * sizeof(*chunk_pages)); + if (err) { + err = -EFAULT; + goto out; + } + + do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); + + err = copy_to_user(&status[i], chunk_status, + chunk_nr * sizeof(*chunk_status)); + if (err) { + err = -EFAULT; + goto out; + } } err = 0; out: - up_read(&mm->mmap_sem); return err; } @@ -1042,6 +1075,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, const int __user *nodes, int __user *status, int flags) { + const struct cred *cred = current_cred(), *tcred; struct task_struct *task; struct mm_struct *mm; int err; @@ -1072,12 +1106,16 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, * capabilities, superuser privileges or the same * userid as the target process. */ - if ((current->euid != task->suid) && (current->euid != task->uid) && - (current->uid != task->suid) && (current->uid != task->uid) && + rcu_read_lock(); + tcred = __task_cred(task); + if (cred->euid != tcred->suid && cred->euid != tcred->uid && + cred->uid != tcred->suid && cred->uid != tcred->uid && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); err = -EPERM; goto out; } + rcu_read_unlock(); err = security_task_movememory(task); if (err) diff --git a/mm/mlock.c b/mm/mlock.c index 1ada366570c..3035a56e761 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -667,3 +667,48 @@ void user_shm_unlock(size_t size, struct user_struct *user) spin_unlock(&shmlock_user_lock); free_uid(user); } + +void *alloc_locked_buffer(size_t size) +{ + unsigned long rlim, vm, pgsz; + void *buffer = NULL; + + pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; + + down_write(¤t->mm->mmap_sem); + + rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; + vm = current->mm->total_vm + pgsz; + if (rlim < vm) + goto out; + + rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; + vm = current->mm->locked_vm + pgsz; + if (rlim < vm) + goto out; + + buffer = kzalloc(size, GFP_KERNEL); + if (!buffer) + goto out; + + current->mm->total_vm += pgsz; + current->mm->locked_vm += pgsz; + + out: + up_write(¤t->mm->mmap_sem); + return buffer; +} + +void free_locked_buffer(void *buffer, size_t size) +{ + unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; + + down_write(¤t->mm->mmap_sem); + + current->mm->total_vm -= pgsz; + current->mm->locked_vm -= pgsz; + + up_write(¤t->mm->mmap_sem); + + kfree(buffer); +} diff --git a/mm/oom_kill.c b/mm/oom_kill.c index a0a01902f55..558f9afe6e4 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -128,8 +128,8 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) * Superuser processes are usually more important, so we make it * less likely that we kill those. */ - if (has_capability(p, CAP_SYS_ADMIN) || - has_capability(p, CAP_SYS_RESOURCE)) + if (has_capability_noaudit(p, CAP_SYS_ADMIN) || + has_capability_noaudit(p, CAP_SYS_RESOURCE)) points /= 4; /* @@ -138,7 +138,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) * tend to only have this flag set on applications they think * of as important. */ - if (has_capability(p, CAP_SYS_RAWIO)) + if (has_capability_noaudit(p, CAP_SYS_RAWIO)) points /= 4; /* @@ -299,9 +299,9 @@ static void dump_tasks(const struct mem_cgroup *mem) task_lock(p); printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", - p->pid, p->uid, p->tgid, p->mm->total_vm, - get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj, - p->comm); + p->pid, __task_cred(p)->uid, p->tgid, + p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p), + p->oomkilladj, p->comm); task_unlock(p); } while_each_thread(g, p); } diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 0b3cbf090a6..ab27ff75051 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -49,6 +49,9 @@ static int __init alloc_node_page_cgroup(int nid) start_pfn = NODE_DATA(nid)->node_start_pfn; nr_pages = NODE_DATA(nid)->node_spanned_pages; + if (!nr_pages) + return 0; + table_size = sizeof(struct page_cgroup) * nr_pages; base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), diff --git a/mm/shmem.c b/mm/shmem.c index 0ed075215e5..f1b0d4871f3 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1513,8 +1513,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) inode = new_inode(sb); if (inode) { inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_blocks = 0; inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; @@ -2278,8 +2278,8 @@ static int shmem_fill_super(struct super_block *sb, sbinfo->max_blocks = 0; sbinfo->max_inodes = 0; sbinfo->mode = S_IRWXUGO | S_ISVTX; - sbinfo->uid = current->fsuid; - sbinfo->gid = current->fsgid; + sbinfo->uid = current_fsuid(); + sbinfo->gid = current_fsgid(); sbinfo->mpol = NULL; sb->s_fs_info = sbinfo; diff --git a/mm/slob.c b/mm/slob.c index cb675d12679..bf7e8fc3aed 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -535,7 +535,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, struct kmem_cache *c; c = slob_alloc(sizeof(struct kmem_cache), - flags, ARCH_KMALLOC_MINALIGN, -1); + GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); if (c) { c->name = name; diff --git a/mm/slub.c b/mm/slub.c index 749588a50a5..a2cd47d89e0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3597,7 +3597,7 @@ static int list_locations(struct kmem_cache *s, char *buf, for (i = 0; i < t.count; i++) { struct location *l = &t.loc[i]; - if (len > PAGE_SIZE - 100) + if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) break; len += sprintf(buf + len, "%7ld ", l->count); diff --git a/mm/swap.c b/mm/swap.c index 2881987603e..b135ec90cde 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -299,7 +299,6 @@ void lru_add_drain(void) put_cpu(); } -#if defined(CONFIG_NUMA) || defined(CONFIG_UNEVICTABLE_LRU) static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_drain(); @@ -313,18 +312,6 @@ int lru_add_drain_all(void) return schedule_on_each_cpu(lru_add_drain_per_cpu); } -#else - -/* - * Returns 0 for success - */ -int lru_add_drain_all(void) -{ - lru_add_drain(); - return 0; -} -#endif - /* * Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and diff --git a/mm/swapfile.c b/mm/swapfile.c index 90cb67a5417..54a9f87e516 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1462,6 +1462,15 @@ static int __init procswaps_init(void) __initcall(procswaps_init); #endif /* CONFIG_PROC_FS */ +#ifdef MAX_SWAPFILES_CHECK +static int __init max_swapfiles_check(void) +{ + MAX_SWAPFILES_CHECK(); + return 0; +} +late_initcall(max_swapfiles_check); +#endif + /* * Written 01/25/92 by Simmule Turner, heavily changed by Linus. * diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f3f6e075856..1ddb77ba399 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1717,7 +1717,7 @@ static int s_show(struct seq_file *m, void *p) v->addr, v->addr + v->size, v->size); if (v->caller) { - char buff[2 * KSYM_NAME_LEN]; + char buff[KSYM_SYMBOL_LEN]; seq_putc(m, ' '); sprint_symbol(buff, (unsigned long)v->caller); |