diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/jump_label.c | 14 | ||||
| -rw-r--r-- | kernel/power/snapshot.c | 6 | ||||
| -rw-r--r-- | kernel/resource.c | 116 | ||||
| -rw-r--r-- | kernel/sched.c | 9 | 
4 files changed, 115 insertions, 30 deletions
| diff --git a/kernel/jump_label.c b/kernel/jump_label.c index fa27e750dbc0..a8ce45097f3d 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -375,15 +375,19 @@ int jump_label_text_reserved(void *start, void *end)  static void jump_label_update(struct jump_label_key *key, int enable)  { -	struct jump_entry *entry = key->entries; - -	/* if there are no users, entry can be NULL */ -	if (entry) -		__jump_label_update(key, entry, __stop___jump_table, enable); +	struct jump_entry *entry = key->entries, *stop = __stop___jump_table;  #ifdef CONFIG_MODULES +	struct module *mod = __module_address((jump_label_t)key); +  	__jump_label_mod_update(key, enable); + +	if (mod) +		stop = mod->jump_entries + mod->num_jump_entries;  #endif +	/* if there are no users, entry can be NULL */ +	if (entry) +		__jump_label_update(key, entry, stop, enable);  }  #endif diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index ace55889f702..06efa54f93d6 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1211,7 +1211,11 @@ static void free_unnecessary_pages(void)  		to_free_highmem = alloc_highmem - save;  	} else {  		to_free_highmem = 0; -		to_free_normal -= save - alloc_highmem; +		save -= alloc_highmem; +		if (to_free_normal > save) +			to_free_normal -= save; +		else +			to_free_normal = 0;  	}  	memory_bm_position_reset(©_bm); diff --git a/kernel/resource.c b/kernel/resource.c index 798e2fae2a06..3ff40178dce7 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -38,6 +38,14 @@ struct resource iomem_resource = {  };  EXPORT_SYMBOL(iomem_resource); +/* constraints to be met while allocating resources */ +struct resource_constraint { +	resource_size_t min, max, align; +	resource_size_t (*alignf)(void *, const struct resource *, +			resource_size_t, resource_size_t); +	void *alignf_data; +}; +  static DEFINE_RWLOCK(resource_lock);  static void *r_next(struct seq_file *m, void *v, loff_t *pos) @@ -384,16 +392,13 @@ static bool resource_contains(struct resource *res1, struct resource *res2)  }  /* - * Find empty slot in the resource tree given range and alignment. + * Find empty slot in the resource tree with the given range and + * alignment constraints   */ -static int find_resource(struct resource *root, struct resource *new, -			 resource_size_t size, resource_size_t min, -			 resource_size_t max, resource_size_t align, -			 resource_size_t (*alignf)(void *, -						   const struct resource *, -						   resource_size_t, -						   resource_size_t), -			 void *alignf_data) +static int __find_resource(struct resource *root, struct resource *old, +			 struct resource *new, +			 resource_size_t  size, +			 struct resource_constraint *constraint)  {  	struct resource *this = root->child;  	struct resource tmp = *new, avail, alloc; @@ -404,25 +409,26 @@ static int find_resource(struct resource *root, struct resource *new,  	 * Skip past an allocated resource that starts at 0, since the assignment  	 * of this->start - 1 to tmp->end below would cause an underflow.  	 */ -	if (this && this->start == 0) { -		tmp.start = this->end + 1; +	if (this && this->start == root->start) { +		tmp.start = (this == old) ? old->start : this->end + 1;  		this = this->sibling;  	}  	for(;;) {  		if (this) -			tmp.end = this->start - 1; +			tmp.end = (this == old) ?  this->end : this->start - 1;  		else  			tmp.end = root->end; -		resource_clip(&tmp, min, max); +		resource_clip(&tmp, constraint->min, constraint->max);  		arch_remove_reservations(&tmp);  		/* Check for overflow after ALIGN() */  		avail = *new; -		avail.start = ALIGN(tmp.start, align); +		avail.start = ALIGN(tmp.start, constraint->align);  		avail.end = tmp.end;  		if (avail.start >= tmp.start) { -			alloc.start = alignf(alignf_data, &avail, size, align); +			alloc.start = constraint->alignf(constraint->alignf_data, &avail, +					size, constraint->align);  			alloc.end = alloc.start + size - 1;  			if (resource_contains(&avail, &alloc)) {  				new->start = alloc.start; @@ -432,14 +438,75 @@ static int find_resource(struct resource *root, struct resource *new,  		}  		if (!this)  			break; -		tmp.start = this->end + 1; +		if (this != old) +			tmp.start = this->end + 1;  		this = this->sibling;  	}  	return -EBUSY;  } +/* + * Find empty slot in the resource tree given range and alignment. + */ +static int find_resource(struct resource *root, struct resource *new, +			resource_size_t size, +			struct resource_constraint  *constraint) +{ +	return  __find_resource(root, NULL, new, size, constraint); +} +  /** - * allocate_resource - allocate empty slot in the resource tree given range & alignment + * reallocate_resource - allocate a slot in the resource tree given range & alignment. + *	The resource will be relocated if the new size cannot be reallocated in the + *	current location. + * + * @root: root resource descriptor + * @old:  resource descriptor desired by caller + * @newsize: new size of the resource descriptor + * @constraint: the size and alignment constraints to be met. + */ +int reallocate_resource(struct resource *root, struct resource *old, +			resource_size_t newsize, +			struct resource_constraint  *constraint) +{ +	int err=0; +	struct resource new = *old; +	struct resource *conflict; + +	write_lock(&resource_lock); + +	if ((err = __find_resource(root, old, &new, newsize, constraint))) +		goto out; + +	if (resource_contains(&new, old)) { +		old->start = new.start; +		old->end = new.end; +		goto out; +	} + +	if (old->child) { +		err = -EBUSY; +		goto out; +	} + +	if (resource_contains(old, &new)) { +		old->start = new.start; +		old->end = new.end; +	} else { +		__release_resource(old); +		*old = new; +		conflict = __request_resource(root, old); +		BUG_ON(conflict); +	} +out: +	write_unlock(&resource_lock); +	return err; +} + + +/** + * allocate_resource - allocate empty slot in the resource tree given range & alignment. + * 	The resource will be reallocated with a new size if it was already allocated   * @root: root resource descriptor   * @new: resource descriptor desired by caller   * @size: requested resource region size @@ -459,12 +526,25 @@ int allocate_resource(struct resource *root, struct resource *new,  		      void *alignf_data)  {  	int err; +	struct resource_constraint constraint;  	if (!alignf)  		alignf = simple_align_resource; +	constraint.min = min; +	constraint.max = max; +	constraint.align = align; +	constraint.alignf = alignf; +	constraint.alignf_data = alignf_data; + +	if ( new->parent ) { +		/* resource is already allocated, try reallocating with +		   the new constraints */ +		return reallocate_resource(root, new, size, &constraint); +	} +  	write_lock(&resource_lock); -	err = find_resource(root, new, size, min, max, align, alignf, alignf_data); +	err = find_resource(root, new, size, &constraint);  	if (err >= 0 && __request_resource(root, new))  		err = -EBUSY;  	write_unlock(&resource_lock); diff --git a/kernel/sched.c b/kernel/sched.c index 3f2e502d609b..9769c756ad66 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock);   * (The default weight is 1024 - so there's no practical   *  limitation from this.)   */ -#define MIN_SHARES	2 -#define MAX_SHARES	(1UL << (18 + SCHED_LOAD_RESOLUTION)) +#define MIN_SHARES	(1UL <<  1) +#define MAX_SHARES	(1UL << 18)  static int root_task_group_load = ROOT_TASK_GROUP_LOAD;  #endif @@ -8450,10 +8450,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)  	if (!tg->se[0])  		return -EINVAL; -	if (shares < MIN_SHARES) -		shares = MIN_SHARES; -	else if (shares > MAX_SHARES) -		shares = MAX_SHARES; +	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));  	mutex_lock(&shares_mutex);  	if (tg->shares == shares) | 
