diff options
author | Alistair Popple <apopple@nvidia.com> | 2025-06-19 18:57:55 +1000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2025-07-09 22:42:16 -0700 |
commit | 79065255abc4b0fae843f539f7f5f0fe9fcac1e6 (patch) | |
tree | 683d8a19b4cbec40865ed8fb77940432deaca902 | |
parent | 6b4a80e424cd2f99ca7262a8c0c725ec82e57b8a (diff) |
mm: remove remaining uses of PFN_DEV
PFN_DEV was used by callers of dax_direct_access() to figure out if the
returned PFN is associated with a page using pfn_t_has_page() or not.
However all DAX PFNs now require an assoicated ZONE_DEVICE page so can
assume a page exists.
Other users of PFN_DEV were setting it before calling vmf_insert_mixed().
This is unnecessary as it is no longer checked, instead relying on
pfn_valid() to determine if there is an associated page or not.
Link: https://lkml.kernel.org/r/74b293aebc21b941090bc3e7aeafa91b57c821a5.1750323463.git-series.apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Björn Töpel <bjorn@kernel.org>
Cc: Björn Töpel <bjorn@rivosinc.com>
Cc: Chunyan Zhang <zhang.lyra@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Inki Dae <m.szyprowski@samsung.com>
Cc: John Groves <john@groves.net>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | drivers/gpu/drm/gma500/fbdev.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_gem.c | 5 | ||||
-rw-r--r-- | drivers/s390/block/dcssblk.c | 3 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci_core.c | 6 | ||||
-rw-r--r-- | fs/cramfs/inode.c | 2 | ||||
-rw-r--r-- | mm/memory.c | 2 |
6 files changed, 8 insertions, 12 deletions
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c index 8edefea2ef59..109efdc96ac5 100644 --- a/drivers/gpu/drm/gma500/fbdev.c +++ b/drivers/gpu/drm/gma500/fbdev.c @@ -33,7 +33,7 @@ static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); for (i = 0; i < page_num; ++i) { - err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV)); + err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, 0)); if (unlikely(err & VM_FAULT_ERROR)) break; address += PAGE_SIZE; diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index b9c67e4ca360..9df05b2b7ba0 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -371,8 +371,7 @@ static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj, VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); - return vmf_insert_mixed(vma, vmf->address, - __pfn_to_pfn_t(pfn, PFN_DEV)); + return vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, 0)); } /* Special handling for the case of faulting in 2d tiled buffers */ @@ -468,7 +467,7 @@ static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj, for (i = n; i > 0; i--) { ret = vmf_insert_mixed(vma, - vaddr, __pfn_to_pfn_t(pfn, PFN_DEV)); + vaddr, __pfn_to_pfn_t(pfn, 0)); if (ret & VM_FAULT_ERROR) break; pfn += priv->usergart[fmt].stride_pfn; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index cdc7b2f16b88..249ae403f698 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -923,8 +923,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, if (kaddr) *kaddr = __va(dev_info->start + offset); if (pfn) - *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), - PFN_DEV); + *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 0); return (dev_sz - offset) / PAGE_SIZE; } diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 6328c3a05bcd..3f2ad5fb4c17 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1669,14 +1669,12 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf, break; #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP case PMD_ORDER: - ret = vmf_insert_pfn_pmd(vmf, - __pfn_to_pfn_t(pfn, PFN_DEV), false); + ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn, 0), false); break; #endif #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP case PUD_ORDER: - ret = vmf_insert_pfn_pud(vmf, - __pfn_to_pfn_t(pfn, PFN_DEV), false); + ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn, 0), false); break; #endif default: diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index b84d1747a020..820a664cfec7 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -412,7 +412,7 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma) for (i = 0; i < pages && !ret; i++) { vm_fault_t vmf; unsigned long off = i * PAGE_SIZE; - pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV); + pfn_t pfn = phys_to_pfn_t(address + off, 0); vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn); if (vmf & VM_FAULT_ERROR) ret = vm_fault_to_errno(vmf, 0); diff --git a/mm/memory.c b/mm/memory.c index 833426fa5fe0..ea1388d2a87a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2557,7 +2557,7 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, pfnmap_setup_cachemode_pfn(pfn, &pgprot); - return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, + return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, 0), pgprot, false); } EXPORT_SYMBOL(vmf_insert_pfn_prot); |