diff options
Diffstat (limited to 'mm/page-writeback.c')
| -rw-r--r-- | mm/page-writeback.c | 41 | 
1 files changed, 12 insertions, 29 deletions
| diff --git a/mm/page-writeback.c b/mm/page-writeback.c index ef413492a149..518e2c3f4c75 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -156,24 +156,6 @@ static unsigned long writeout_period_time = 0;  #define VM_COMPLETIONS_PERIOD_LEN (3*HZ)  /* - * Work out the current dirty-memory clamping and background writeout - * thresholds. - * - * The main aim here is to lower them aggressively if there is a lot of mapped - * memory around.  To avoid stressing page reclaim with lots of unreclaimable - * pages.  It is better to clamp down on writers than to start swapping, and - * performing lots of scanning. - * - * We only allow 1/2 of the currently-unmapped memory to be dirtied. - * - * We don't permit the clamping level to fall below 5% - that is getting rather - * excessive. - * - * We make sure that the background writeout level is below the adjusted - * clamping level. - */ - -/*   * In a memory zone, there is a certain amount of pages we consider   * available for the page cache, which is essentially the number of   * free and reclaimable pages, minus some zone reserves to protect @@ -593,14 +575,14 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)   * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)   *     => fast response on large errors; small oscillation near setpoint   */ -static inline long long pos_ratio_polynom(unsigned long setpoint, +static long long pos_ratio_polynom(unsigned long setpoint,  					  unsigned long dirty,  					  unsigned long limit)  {  	long long pos_ratio;  	long x; -	x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, +	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,  		    limit - setpoint + 1);  	pos_ratio = x;  	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; @@ -842,7 +824,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,  	x_intercept = bdi_setpoint + span;  	if (bdi_dirty < x_intercept - span / 4) { -		pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty), +		pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),  				    x_intercept - bdi_setpoint + 1);  	} else  		pos_ratio /= 4; @@ -1623,7 +1605,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)  	 * 1000+ tasks, all of them start dirtying pages at exactly the same  	 * time, hence all honoured too large initial task->nr_dirtied_pause.  	 */ -	p =  &__get_cpu_var(bdp_ratelimits); +	p =  this_cpu_ptr(&bdp_ratelimits);  	if (unlikely(current->nr_dirtied >= ratelimit))  		*p = 0;  	else if (unlikely(*p >= ratelimit_pages)) { @@ -1635,7 +1617,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)  	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping  	 * the dirty throttling and livelock other long-run dirtiers.  	 */ -	p = &__get_cpu_var(dirty_throttle_leaks); +	p = this_cpu_ptr(&dirty_throttle_leaks);  	if (*p > 0 && current->nr_dirtied < ratelimit) {  		unsigned long nr_pages_dirtied;  		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); @@ -1682,7 +1664,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)  /*   * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs   */ -int dirty_writeback_centisecs_handler(ctl_table *table, int write, +int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,  	void __user *buffer, size_t *length, loff_t *ppos)  {  	proc_dointvec(table, write, buffer, length, ppos); @@ -2398,7 +2380,7 @@ int test_clear_page_writeback(struct page *page)  	return ret;  } -int test_set_page_writeback(struct page *page) +int __test_set_page_writeback(struct page *page, bool keep_write)  {  	struct address_space *mapping = page_mapping(page);  	int ret; @@ -2423,9 +2405,10 @@ int test_set_page_writeback(struct page *page)  			radix_tree_tag_clear(&mapping->page_tree,  						page_index(page),  						PAGECACHE_TAG_DIRTY); -		radix_tree_tag_clear(&mapping->page_tree, -				     page_index(page), -				     PAGECACHE_TAG_TOWRITE); +		if (!keep_write) +			radix_tree_tag_clear(&mapping->page_tree, +						page_index(page), +						PAGECACHE_TAG_TOWRITE);  		spin_unlock_irqrestore(&mapping->tree_lock, flags);  	} else {  		ret = TestSetPageWriteback(page); @@ -2436,7 +2419,7 @@ int test_set_page_writeback(struct page *page)  	return ret;  } -EXPORT_SYMBOL(test_set_page_writeback); +EXPORT_SYMBOL(__test_set_page_writeback);  /*   * Return true if any of the pages in the mapping are marked with the | 
