diff options
Diffstat (limited to 'fs/ntfs/compress.c')
| -rw-r--r-- | fs/ntfs/compress.c | 77 | 
1 files changed, 34 insertions, 43 deletions
| diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c index f82498c35e78..f2b5e746f49b 100644 --- a/fs/ntfs/compress.c +++ b/fs/ntfs/compress.c @@ -104,16 +104,12 @@ static void zero_partial_compressed_page(struct page *page,  	unsigned int kp_ofs;  	ntfs_debug("Zeroing page region outside initialized size."); -	if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { -		/* -		 * FIXME: Using clear_page() will become wrong when we get -		 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem. -		 */ +	if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {  		clear_page(kp);  		return;  	} -	kp_ofs = initialized_size & ~PAGE_CACHE_MASK; -	memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); +	kp_ofs = initialized_size & ~PAGE_MASK; +	memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);  	return;  } @@ -123,7 +119,7 @@ static void zero_partial_compressed_page(struct page *page,  static inline void handle_bounds_compressed_page(struct page *page,  		const loff_t i_size, const s64 initialized_size)  { -	if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && +	if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&  			(initialized_size < i_size))  		zero_partial_compressed_page(page, initialized_size);  	return; @@ -160,7 +156,7 @@ static inline void handle_bounds_compressed_page(struct page *page,   * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was   * completed during the decompression of the compression block (@cb_start).   * - * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up + * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up   * unpredicatbly! You have been warned!   *   * Note to hackers: This function may not sleep until it has finished accessing @@ -241,7 +237,7 @@ return_error:  				if (di == xpage)  					*xpage_done = 1;  				else -					page_cache_release(dp); +					put_page(dp);  				dest_pages[di] = NULL;  			}  		} @@ -274,7 +270,7 @@ return_error:  		cb = cb_sb_end;  		/* Advance destination position to next sub-block. */ -		*dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK; +		*dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;  		if (!*dest_ofs && (++*dest_index > dest_max_index))  			goto return_overflow;  		goto do_next_sb; @@ -301,7 +297,7 @@ return_error:  		/* Advance destination position to next sub-block. */  		*dest_ofs += NTFS_SB_SIZE; -		if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) { +		if (!(*dest_ofs &= ~PAGE_MASK)) {  finalize_page:  			/*  			 * First stage: add current page index to array of @@ -335,7 +331,7 @@ do_next_tag:  			*dest_ofs += nr_bytes;  		}  		/* We have finished the current sub-block. */ -		if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) +		if (!(*dest_ofs &= ~PAGE_MASK))  			goto finalize_page;  		goto do_next_sb;  	} @@ -462,7 +458,7 @@ return_overflow:   * have been written to so that we would lose data if we were to just overwrite   * them with the out-of-date uncompressed data.   * - * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at + * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at   * the end of the file I think. We need to detect this case and zero the out   * of bounds remainder of the page in question and mark it as handled. At the   * moment we would just return -EIO on such a page. This bug will only become @@ -470,7 +466,7 @@ return_overflow:   * clusters so is probably not going to be seen by anyone. Still this should   * be fixed. (AIA)   * - * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in + * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in   * handling sparse and compressed cbs. (AIA)   *   * FIXME: At the moment we don't do any zeroing out in the case that @@ -497,14 +493,14 @@ int ntfs_read_compressed_block(struct page *page)  	u64 cb_size_mask = cb_size - 1UL;  	VCN vcn;  	LCN lcn; -	/* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */ -	VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >> +	/* The first wanted vcn (minimum alignment is PAGE_SIZE). */ +	VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>  			vol->cluster_size_bits;  	/*  	 * The first vcn after the last wanted vcn (minimum alignment is again -	 * PAGE_CACHE_SIZE. +	 * PAGE_SIZE.  	 */ -	VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1) +	VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)  			& ~cb_size_mask) >> vol->cluster_size_bits;  	/* Number of compression blocks (cbs) in the wanted vcn range. */  	unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits @@ -515,7 +511,7 @@ int ntfs_read_compressed_block(struct page *page)  	 * guarantees of start_vcn and end_vcn, no need to round up here.  	 */  	unsigned int nr_pages = (end_vcn - start_vcn) << -			vol->cluster_size_bits >> PAGE_CACHE_SHIFT; +			vol->cluster_size_bits >> PAGE_SHIFT;  	unsigned int xpage, max_page, cur_page, cur_ofs, i;  	unsigned int cb_clusters, cb_max_ofs;  	int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0; @@ -549,7 +545,7 @@ int ntfs_read_compressed_block(struct page *page)  	 * We have already been given one page, this is the one we must do.  	 * Once again, the alignment guarantees keep it simple.  	 */ -	offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT; +	offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;  	xpage = index - offset;  	pages[xpage] = page;  	/* @@ -560,13 +556,13 @@ int ntfs_read_compressed_block(struct page *page)  	i_size = i_size_read(VFS_I(ni));  	initialized_size = ni->initialized_size;  	read_unlock_irqrestore(&ni->size_lock, flags); -	max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - +	max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -  			offset;  	/* Is the page fully outside i_size? (truncate in progress) */  	if (xpage >= max_page) {  		kfree(bhs);  		kfree(pages); -		zero_user(page, 0, PAGE_CACHE_SIZE); +		zero_user(page, 0, PAGE_SIZE);  		ntfs_debug("Compressed read outside i_size - truncated?");  		SetPageUptodate(page);  		unlock_page(page); @@ -591,7 +587,7 @@ int ntfs_read_compressed_block(struct page *page)  				continue;  			}  			unlock_page(page); -			page_cache_release(page); +			put_page(page);  			pages[i] = NULL;  		}  	} @@ -735,9 +731,9 @@ lock_retry_remap:  	ntfs_debug("Successfully read the compression block.");  	/* The last page and maximum offset within it for the current cb. */ -	cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size; -	cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK; -	cb_max_page >>= PAGE_CACHE_SHIFT; +	cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size; +	cb_max_ofs = cb_max_page & ~PAGE_MASK; +	cb_max_page >>= PAGE_SHIFT;  	/* Catch end of file inside a compression block. */  	if (cb_max_page > max_page) @@ -753,16 +749,11 @@ lock_retry_remap:  		for (; cur_page < cb_max_page; cur_page++) {  			page = pages[cur_page];  			if (page) { -				/* -				 * FIXME: Using clear_page() will become wrong -				 * when we get PAGE_CACHE_SIZE != PAGE_SIZE but -				 * for now there is no problem. -				 */  				if (likely(!cur_ofs))  					clear_page(page_address(page));  				else  					memset(page_address(page) + cur_ofs, 0, -							PAGE_CACHE_SIZE - +							PAGE_SIZE -  							cur_ofs);  				flush_dcache_page(page);  				kunmap(page); @@ -771,10 +762,10 @@ lock_retry_remap:  				if (cur_page == xpage)  					xpage_done = 1;  				else -					page_cache_release(page); +					put_page(page);  				pages[cur_page] = NULL;  			} -			cb_pos += PAGE_CACHE_SIZE - cur_ofs; +			cb_pos += PAGE_SIZE - cur_ofs;  			cur_ofs = 0;  			if (cb_pos >= cb_end)  				break; @@ -807,7 +798,7 @@ lock_retry_remap:  		 * synchronous io for the majority of pages.  		 * Or if we choose not to do the read-ahead/-behind stuff, we  		 * could just return block_read_full_page(pages[xpage]) as long -		 * as PAGE_CACHE_SIZE <= cb_size. +		 * as PAGE_SIZE <= cb_size.  		 */  		if (cb_max_ofs)  			cb_max_page--; @@ -816,8 +807,8 @@ lock_retry_remap:  			page = pages[cur_page];  			if (page)  				memcpy(page_address(page) + cur_ofs, cb_pos, -						PAGE_CACHE_SIZE - cur_ofs); -			cb_pos += PAGE_CACHE_SIZE - cur_ofs; +						PAGE_SIZE - cur_ofs); +			cb_pos += PAGE_SIZE - cur_ofs;  			cur_ofs = 0;  			if (cb_pos >= cb_end)  				break; @@ -850,10 +841,10 @@ lock_retry_remap:  				if (cur2_page == xpage)  					xpage_done = 1;  				else -					page_cache_release(page); +					put_page(page);  				pages[cur2_page] = NULL;  			} -			cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2; +			cb_pos2 += PAGE_SIZE - cur_ofs2;  			cur_ofs2 = 0;  			if (cb_pos2 >= cb_end)  				break; @@ -884,7 +875,7 @@ lock_retry_remap:  					kunmap(page);  					unlock_page(page);  					if (prev_cur_page != xpage) -						page_cache_release(page); +						put_page(page);  					pages[prev_cur_page] = NULL;  				}  			} @@ -914,7 +905,7 @@ lock_retry_remap:  			kunmap(page);  			unlock_page(page);  			if (cur_page != xpage) -				page_cache_release(page); +				put_page(page);  			pages[cur_page] = NULL;  		}  	} @@ -961,7 +952,7 @@ err_out:  			kunmap(page);  			unlock_page(page);  			if (i != xpage) -				page_cache_release(page); +				put_page(page);  		}  	}  	kfree(pages); | 
