diff options
Diffstat (limited to 'fs/btrfs/extent_io.c')
| -rw-r--r-- | fs/btrfs/extent_io.c | 250 | 
1 files changed, 150 insertions, 100 deletions
| diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b923d0cec61c..d9f856358704 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -198,9 +198,8 @@ static void __process_folios_contig(struct address_space *mapping,  				    u64 end, unsigned long page_ops)  {  	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host); -	pgoff_t start_index = start >> PAGE_SHIFT; +	pgoff_t index = start >> PAGE_SHIFT;  	pgoff_t end_index = end >> PAGE_SHIFT; -	pgoff_t index = start_index;  	struct folio_batch fbatch;  	int i; @@ -221,7 +220,7 @@ static void __process_folios_contig(struct address_space *mapping,  	}  } -static noinline void __unlock_for_delalloc(const struct inode *inode, +static noinline void unlock_delalloc_folio(const struct inode *inode,  					   const struct folio *locked_folio,  					   u64 start, u64 end)  { @@ -242,9 +241,8 @@ static noinline int lock_delalloc_folios(struct inode *inode,  {  	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);  	struct address_space *mapping = inode->i_mapping; -	pgoff_t start_index = start >> PAGE_SHIFT; +	pgoff_t index = start >> PAGE_SHIFT;  	pgoff_t end_index = end >> PAGE_SHIFT; -	pgoff_t index = start_index;  	u64 processed_end = start;  	struct folio_batch fbatch; @@ -288,8 +286,7 @@ static noinline int lock_delalloc_folios(struct inode *inode,  out:  	folio_batch_release(&fbatch);  	if (processed_end > start) -		__unlock_for_delalloc(inode, locked_folio, start, -				      processed_end); +		unlock_delalloc_folio(inode, locked_folio, start, processed_end);  	return -EAGAIN;  } @@ -390,7 +387,7 @@ again:  	unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);  	if (!ret) { -		__unlock_for_delalloc(inode, locked_folio, delalloc_start, +		unlock_delalloc_folio(inode, locked_folio, delalloc_start,  				      delalloc_end);  		cond_resched();  		goto again; @@ -710,6 +707,7 @@ static void alloc_new_bio(struct btrfs_inode *inode,  	bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,  			       bio_ctrl->end_io_func, NULL);  	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; +	bbio->bio.bi_write_hint = inode->vfs_inode.i_write_hint;  	bbio->inode = inode;  	bbio->file_offset = file_offset;  	bio_ctrl->bbio = bbio; @@ -862,11 +860,6 @@ static int attach_extent_buffer_folio(struct extent_buffer *eb,  	return ret;  } -int set_page_extent_mapped(struct page *page) -{ -	return set_folio_extent_mapped(page_folio(page)); -} -  int set_folio_extent_mapped(struct folio *folio)  {  	struct btrfs_fs_info *fs_info; @@ -901,9 +894,9 @@ void clear_folio_extent_mapped(struct folio *folio)  	folio_detach_private(folio);  } -static struct extent_map *__get_extent_map(struct inode *inode, -					   struct folio *folio, u64 start, -					   u64 len, struct extent_map **em_cached) +static struct extent_map *get_extent_map(struct btrfs_inode *inode, +					 struct folio *folio, u64 start, +					 u64 len, struct extent_map **em_cached)  {  	struct extent_map *em;  	struct extent_state *cached_state = NULL; @@ -922,14 +915,14 @@ static struct extent_map *__get_extent_map(struct inode *inode,  		*em_cached = NULL;  	} -	btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), start, start + len - 1, &cached_state); -	em = btrfs_get_extent(BTRFS_I(inode), folio, start, len); +	btrfs_lock_and_flush_ordered_range(inode, start, start + len - 1, &cached_state); +	em = btrfs_get_extent(inode, folio, start, len);  	if (!IS_ERR(em)) {  		BUG_ON(*em_cached);  		refcount_inc(&em->refs);  		*em_cached = em;  	} -	unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len - 1, &cached_state); +	unlock_extent(&inode->io_tree, start, start + len - 1, &cached_state);  	return em;  } @@ -985,8 +978,7 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,  			end_folio_read(folio, true, cur, iosize);  			break;  		} -		em = __get_extent_map(inode, folio, cur, end - cur + 1, -				      em_cached); +		em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);  		if (IS_ERR(em)) {  			end_folio_read(folio, false, cur, end + 1 - cur);  			return PTR_ERR(em); @@ -1142,14 +1134,19 @@ static bool find_next_delalloc_bitmap(struct folio *folio,  }  /* - * helper for extent_writepage(), doing all of the delayed allocation setup. + * Do all of the delayed allocation setup.   * - * This returns 1 if btrfs_run_delalloc_range function did all the work required - * to write the page (copy into inline extent).  In this case the IO has - * been started and the page is already unlocked. + * Return >0 if all the dirty blocks are submitted async (compression) or inlined. + * The @folio should no longer be touched (treat it as already unlocked).   * - * This returns 0 if all went well (page still locked) - * This returns < 0 if there were errors (page still locked) + * Return 0 if there is still dirty block that needs to be submitted through + * extent_writepage_io(). + * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be + * submitted, and @folio is still kept locked. + * + * Return <0 if there is any error hit. + * Any allocated ordered extent range covering this folio will be marked + * finished (IOERR), and @folio is still kept locked.   */  static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,  						 struct folio *folio, @@ -1167,6 +1164,16 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,  	 * last delalloc end.  	 */  	u64 last_delalloc_end = 0; +	/* +	 * The range end (exclusive) of the last successfully finished delalloc +	 * range. +	 * Any range covered by ordered extent must either be manually marked +	 * finished (error handling), or has IO submitted (and finish the +	 * ordered extent normally). +	 * +	 * This records the end of ordered extent cleanup if we hit an error. +	 */ +	u64 last_finished_delalloc_end = page_start;  	u64 delalloc_start = page_start;  	u64 delalloc_end = page_end;  	u64 delalloc_to_write = 0; @@ -1235,11 +1242,28 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,  			found_len = last_delalloc_end + 1 - found_start;  		if (ret >= 0) { +			/* +			 * Some delalloc range may be created by previous folios. +			 * Thus we still need to clean up this range during error +			 * handling. +			 */ +			last_finished_delalloc_end = found_start;  			/* No errors hit so far, run the current delalloc range. */  			ret = btrfs_run_delalloc_range(inode, folio,  						       found_start,  						       found_start + found_len - 1,  						       wbc); +			if (ret >= 0) +				last_finished_delalloc_end = found_start + found_len; +			if (unlikely(ret < 0)) +				btrfs_err_rl(fs_info, +"failed to run delalloc range, root=%lld ino=%llu folio=%llu submit_bitmap=%*pbl start=%llu len=%u: %d", +					     btrfs_root_id(inode->root), +					     btrfs_ino(inode), +					     folio_pos(folio), +					     fs_info->sectors_per_page, +					     &bio_ctrl->submit_bitmap, +					     found_start, found_len, ret);  		} else {  			/*  			 * We've hit an error during previous delalloc range, @@ -1247,7 +1271,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,  			 */  			unlock_extent(&inode->io_tree, found_start,  				      found_start + found_len - 1, NULL); -			__unlock_for_delalloc(&inode->vfs_inode, folio, +			unlock_delalloc_folio(&inode->vfs_inode, folio,  					      found_start,  					      found_start + found_len - 1);  		} @@ -1274,8 +1298,22 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,  		delalloc_start = found_start + found_len;  	} -	if (ret < 0) +	/* +	 * It's possible we had some ordered extents created before we hit +	 * an error, cleanup non-async successfully created delalloc ranges. +	 */ +	if (unlikely(ret < 0)) { +		unsigned int bitmap_size = min( +				(last_finished_delalloc_end - page_start) >> +				fs_info->sectorsize_bits, +				fs_info->sectors_per_page); + +		for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size) +			btrfs_mark_ordered_io_finished(inode, folio, +				page_start + (bit << fs_info->sectorsize_bits), +				fs_info->sectorsize, false);  		return ret; +	}  out:  	if (last_delalloc_end)  		delalloc_end = last_delalloc_end; @@ -1335,7 +1373,7 @@ static int submit_one_sector(struct btrfs_inode *inode,  	em = btrfs_get_extent(inode, NULL, filepos, sectorsize);  	if (IS_ERR(em)) -		return PTR_ERR_OR_ZERO(em); +		return PTR_ERR(em);  	extent_offset = filepos - em->start;  	em_end = extent_map_end(em); @@ -1391,6 +1429,7 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,  	struct btrfs_fs_info *fs_info = inode->root->fs_info;  	unsigned long range_bitmap = 0;  	bool submitted_io = false; +	bool error = false;  	const u64 folio_start = folio_pos(folio);  	u64 cur;  	int bit; @@ -1433,11 +1472,26 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,  			break;  		}  		ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size); -		if (ret < 0) -			goto out; +		if (unlikely(ret < 0)) { +			/* +			 * bio_ctrl may contain a bio crossing several folios. +			 * Submit it immediately so that the bio has a chance +			 * to finish normally, other than marked as error. +			 */ +			submit_one_bio(bio_ctrl); +			/* +			 * Failed to grab the extent map which should be very rare. +			 * Since there is no bio submitted to finish the ordered +			 * extent, we have to manually finish this sector. +			 */ +			btrfs_mark_ordered_io_finished(inode, folio, cur, +						       fs_info->sectorsize, false); +			error = true; +			continue; +		}  		submitted_io = true;  	} -out: +  	/*  	 * If we didn't submitted any sector (>= i_size), folio dirty get  	 * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared @@ -1445,8 +1499,11 @@ out:  	 *  	 * Here we set writeback and clear for the range. If the full folio  	 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag. +	 * +	 * If we hit any error, the corresponding sector will still be dirty +	 * thus no need to clear PAGECACHE_TAG_DIRTY.  	 */ -	if (!submitted_io) { +	if (!submitted_io && !error) {  		btrfs_folio_set_writeback(fs_info, folio, start, len);  		btrfs_folio_clear_writeback(fs_info, folio, start, len);  	} @@ -1464,15 +1521,14 @@ out:   */  static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)  { -	struct inode *inode = folio->mapping->host; -	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); -	const u64 page_start = folio_pos(folio); +	struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); +	struct btrfs_fs_info *fs_info = inode->root->fs_info;  	int ret;  	size_t pg_offset; -	loff_t i_size = i_size_read(inode); +	loff_t i_size = i_size_read(&inode->vfs_inode);  	unsigned long end_index = i_size >> PAGE_SHIFT; -	trace_extent_writepage(folio, inode, bio_ctrl->wbc); +	trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);  	WARN_ON(!folio_test_locked(folio)); @@ -1496,26 +1552,28 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl  	if (ret < 0)  		goto done; -	ret = writepage_delalloc(BTRFS_I(inode), folio, bio_ctrl); +	ret = writepage_delalloc(inode, folio, bio_ctrl);  	if (ret == 1)  		return 0;  	if (ret)  		goto done; -	ret = extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio), +	ret = extent_writepage_io(inode, folio, folio_pos(folio),  				  PAGE_SIZE, bio_ctrl, i_size);  	if (ret == 1)  		return 0; +	if (ret < 0) +		btrfs_err_rl(fs_info, +"failed to submit blocks, root=%lld inode=%llu folio=%llu submit_bitmap=%*pbl: %d", +			     btrfs_root_id(inode->root), btrfs_ino(inode), +			     folio_pos(folio), fs_info->sectors_per_page, +			     &bio_ctrl->submit_bitmap, ret);  	bio_ctrl->wbc->nr_to_write--;  done: -	if (ret) { -		btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio, -					       page_start, PAGE_SIZE, !ret); +	if (ret < 0)  		mapping_set_error(folio->mapping, ret); -	} -  	/*  	 * Only unlock ranges that are submitted. As there can be some async  	 * submitted ranges inside the folio. @@ -1525,12 +1583,6 @@ done:  	return ret;  } -void wait_on_extent_buffer_writeback(struct extent_buffer *eb) -{ -	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, -		       TASK_UNINTERRUPTIBLE); -} -  /*   * Lock extent buffer status and pages for writeback.   * @@ -1671,11 +1723,10 @@ static void end_bbio_meta_write(struct btrfs_bio *bbio)  {  	struct extent_buffer *eb = bbio->private;  	struct btrfs_fs_info *fs_info = eb->fs_info; -	bool uptodate = !bbio->bio.bi_status;  	struct folio_iter fi;  	u32 bio_offset = 0; -	if (!uptodate) +	if (bbio->bio.bi_status != BLK_STS_OK)  		set_btree_ioerr(eb);  	bio_for_each_folio_all(fi, &bbio->bio) { @@ -2292,11 +2343,8 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f  		if (ret == 1)  			goto next_page; -		if (ret) { -			btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio, -						       cur, cur_len, !ret); +		if (ret)  			mapping_set_error(mapping, ret); -		}  		btrfs_folio_end_lock(fs_info, folio, cur, cur_len);  		if (ret < 0)  			found_error = true; @@ -2495,11 +2543,6 @@ next:  	return try_release_extent_state(io_tree, folio);  } -static void __free_extent_buffer(struct extent_buffer *eb) -{ -	kmem_cache_free(extent_buffer_cache, eb); -} -  static int extent_buffer_under_io(const struct extent_buffer *eb)  {  	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || @@ -2580,8 +2623,8 @@ static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct fo  	spin_unlock(&folio->mapping->i_private_lock);  } -/* Release all pages attached to the extent buffer */ -static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb) +/* Release all folios attached to the extent buffer */ +static void btrfs_release_extent_buffer_folios(const struct extent_buffer *eb)  {  	ASSERT(!extent_buffer_under_io(eb)); @@ -2603,9 +2646,9 @@ static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)   */  static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)  { -	btrfs_release_extent_buffer_pages(eb); +	btrfs_release_extent_buffer_folios(eb);  	btrfs_leak_debug_del_eb(eb); -	__free_extent_buffer(eb); +	kmem_cache_free(extent_buffer_cache, eb);  }  static struct extent_buffer * @@ -2703,7 +2746,7 @@ err:  			folio_put(eb->folios[i]);  		}  	} -	__free_extent_buffer(eb); +	kmem_cache_free(extent_buffer_cache, eb);  	return NULL;  } @@ -2830,13 +2873,12 @@ free_eb:  }  #endif -static struct extent_buffer *grab_extent_buffer( -		struct btrfs_fs_info *fs_info, struct page *page) +static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info, +						struct folio *folio)  { -	struct folio *folio = page_folio(page);  	struct extent_buffer *exists; -	lockdep_assert_held(&page->mapping->i_private_lock); +	lockdep_assert_held(&folio->mapping->i_private_lock);  	/*  	 * For subpage case, we completely rely on radix tree to ensure we @@ -2851,7 +2893,7 @@ static struct extent_buffer *grab_extent_buffer(  		return NULL;  	/* -	 * We could have already allocated an eb for this page and attached one +	 * We could have already allocated an eb for this folio and attached one  	 * so lets see if we can get a ref on the existing eb, and if we can we  	 * know it's good and we can just return that one, else we know we can  	 * just overwrite folio private. @@ -2860,16 +2902,19 @@ static struct extent_buffer *grab_extent_buffer(  	if (atomic_inc_not_zero(&exists->refs))  		return exists; -	WARN_ON(PageDirty(page)); +	WARN_ON(folio_test_dirty(folio));  	folio_detach_private(folio);  	return NULL;  } -static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start) +/* + * Validate alignment constraints of eb at logical address @start. + */ +static bool check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)  {  	if (!IS_ALIGNED(start, fs_info->sectorsize)) {  		btrfs_err(fs_info, "bad tree block start %llu", start); -		return -EINVAL; +		return true;  	}  	if (fs_info->nodesize < PAGE_SIZE && @@ -2877,14 +2922,14 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)  		btrfs_err(fs_info,  		"tree block crosses page boundary, start %llu nodesize %u",  			  start, fs_info->nodesize); -		return -EINVAL; +		return true;  	}  	if (fs_info->nodesize >= PAGE_SIZE &&  	    !PAGE_ALIGNED(start)) {  		btrfs_err(fs_info,  		"tree block is not page aligned, start %llu nodesize %u",  			  start, fs_info->nodesize); -		return -EINVAL; +		return true;  	}  	if (!IS_ALIGNED(start, fs_info->nodesize) &&  	    !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) { @@ -2892,10 +2937,9 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)  "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",  			      start, fs_info->nodesize);  	} -	return 0; +	return false;  } -  /*   * Return 0 if eb->folios[i] is attached to btree inode successfully.   * Return >0 if there is already another extent buffer for the range, @@ -2951,8 +2995,7 @@ finish:  	} else if (existing_folio) {  		struct extent_buffer *existing_eb; -		existing_eb = grab_extent_buffer(fs_info, -						 folio_page(existing_folio, 0)); +		existing_eb = grab_extent_buffer(fs_info, existing_folio);  		if (existing_eb) {  			/* The extent buffer still exists, we can use it directly. */  			*found_eb_ret = existing_eb; @@ -3149,7 +3192,7 @@ again:  	 * live buffer and won't free them prematurely.  	 */  	for (int i = 0; i < num_folios; i++) -		unlock_page(folio_page(eb->folios[i], 0)); +		folio_unlock(eb->folios[i]);  	return eb;  out: @@ -3173,7 +3216,7 @@ out:  	for (int i = 0; i < attached; i++) {  		ASSERT(eb->folios[i]);  		detach_extent_buffer_folio(eb, eb->folios[i]); -		unlock_page(folio_page(eb->folios[i], 0)); +		folio_unlock(eb->folios[i]);  		folio_put(eb->folios[i]);  		eb->folios[i] = NULL;  	} @@ -3195,7 +3238,7 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)  	struct extent_buffer *eb =  			container_of(head, struct extent_buffer, rcu_head); -	__free_extent_buffer(eb); +	kmem_cache_free(extent_buffer_cache, eb);  }  static int release_extent_buffer(struct extent_buffer *eb) @@ -3219,11 +3262,11 @@ static int release_extent_buffer(struct extent_buffer *eb)  		}  		btrfs_leak_debug_del_eb(eb); -		/* Should be safe to release our pages at this point */ -		btrfs_release_extent_buffer_pages(eb); +		/* Should be safe to release folios at this point. */ +		btrfs_release_extent_buffer_folios(eb);  #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS  		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { -			__free_extent_buffer(eb); +			kmem_cache_free(extent_buffer_cache, eb);  			return 1;  		}  #endif @@ -3382,12 +3425,12 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)  		 * the above race.  		 */  		if (subpage) -			lock_page(folio_page(eb->folios[0], 0)); +			folio_lock(eb->folios[0]);  		for (int i = 0; i < num_folios; i++)  			btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],  					      eb->start, eb->len);  		if (subpage) -			unlock_page(folio_page(eb->folios[0], 0)); +			folio_unlock(eb->folios[0]);  		percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,  					 eb->len,  					 eb->fs_info->dirty_metadata_batch); @@ -3497,8 +3540,8 @@ static void end_bbio_meta_read(struct btrfs_bio *bbio)  	bio_put(&bbio->bio);  } -int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, -			     const struct btrfs_tree_parent_check *check) +int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num, +				    const struct btrfs_tree_parent_check *check)  {  	struct btrfs_bio *bbio;  	bool ret; @@ -3516,7 +3559,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,  	/* Someone else is already reading the buffer, just wait for it. */  	if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags)) -		goto done; +		return 0;  	/*  	 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above @@ -3556,14 +3599,21 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,  		}  	}  	btrfs_submit_bbio(bbio, mirror_num); +	return 0; +} -done: -	if (wait == WAIT_COMPLETE) { -		wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); -		if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) -			return -EIO; -	} +int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num, +			     const struct btrfs_tree_parent_check *check) +{ +	int ret; +	ret = read_extent_buffer_pages_nowait(eb, mirror_num, check); +	if (ret < 0) +		return ret; + +	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); +	if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) +		return -EIO;  	return 0;  } @@ -4294,7 +4344,7 @@ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,  		return;  	} -	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check); +	ret = read_extent_buffer_pages_nowait(eb, 0, &check);  	if (ret < 0)  		free_extent_buffer_stale(eb);  	else | 
