diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-25 11:08:35 -0800 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-25 11:08:35 -0800 | 
| commit | 9b81d512a4a89dadfd3887d1b4443eeb2c20c573 (patch) | |
| tree | f43501b249d8935bd228d00a328d7f7ef37dfcf5 /drivers/nvme/host/pci.c | |
| parent | 4cf193b4b2363bfed0b4e040e61f20d78192e2e0 (diff) | |
| parent | dcd8376c369fa8fde8269e721b14f50475dd397b (diff) | |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull more block layer fixes from Jens Axboe:
 "I wasn't going to send off a new pull before next week, but the blk
  flush fix from Jan from the other day introduced a regression.  It's
  rare enough not to have hit during testing, since it requires both a
  device that rejects the first flush, and bad timing while it does
  that.  But since someone did hit it, let's get the revert into 4.4-rc3
  so we don't have a released rc with that known issue.
  Apart from that revert, three other fixes:
   - From Christoph, a fix for a missing unmap in NVMe request
     preparation.
   - An NVMe fix from Nishanth that fixes data corruption on powerpc.
   - Also from Christoph, fix a list_del() attempt on blk-mq that didn't
     have a matching list_add() at timer start"
* 'for-linus' of git://git.kernel.dk/linux-block:
  Revert "blk-flush: Queue through IO scheduler when flush not required"
  block: fix blk_abort_request for blk-mq drivers
  nvme: add missing unmaps in nvme_queue_rq
  NVMe: default to 4k device page size
Diffstat (limited to 'drivers/nvme/host/pci.c')
| -rw-r--r-- | drivers/nvme/host/pci.c | 30 | 
1 files changed, 18 insertions, 12 deletions
| diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 930042fa2d69..f3b53af789ef 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,  			goto retry_cmd;  		}  		if (blk_integrity_rq(req)) { -			if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) +			if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) { +				dma_unmap_sg(dev->dev, iod->sg, iod->nents, +						dma_dir);  				goto error_cmd; +			}  			sg_init_table(iod->meta_sg, 1);  			if (blk_rq_map_integrity_sg( -					req->q, req->bio, iod->meta_sg) != 1) +					req->q, req->bio, iod->meta_sg) != 1) { +				dma_unmap_sg(dev->dev, iod->sg, iod->nents, +						dma_dir);  				goto error_cmd; +			}  			if (rq_data_dir(req))  				nvme_dif_remap(req, nvme_dif_prep); -			if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) +			if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) { +				dma_unmap_sg(dev->dev, iod->sg, iod->nents, +						dma_dir);  				goto error_cmd; +			}  		}  	} @@ -1728,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)  	u32 aqa;  	u64 cap = lo_hi_readq(&dev->bar->cap);  	struct nvme_queue *nvmeq; -	unsigned page_shift = PAGE_SHIFT; +	/* +	 * default to a 4K page size, with the intention to update this +	 * path in the future to accomodate architectures with differing +	 * kernel and IO page sizes. +	 */ +	unsigned page_shift = 12;  	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; -	unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;  	if (page_shift < dev_page_min) {  		dev_err(dev->dev, @@ -1739,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)  				1 << page_shift);  		return -ENODEV;  	} -	if (page_shift > dev_page_max) { -		dev_info(dev->dev, -				"Device maximum page size (%u) smaller than " -				"host (%u); enabling work-around\n", -				1 << dev_page_max, 1 << page_shift); -		page_shift = dev_page_max; -	}  	dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?  						NVME_CAP_NSSRC(cap) : 0; | 
