summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2019-08-30 09:49:28 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2019-08-30 09:49:28 +1000
commit07aa1e786d4fc93cf646797ce045510dd5e982ee (patch)
tree683ec43bbae7c784b37632b2311c46292c68b5fa
parentbc605cd79edb68131d3be5b00b949aa312277d39 (diff)
parent5cbdaeefb655072d304744812708b3f3a31c6b51 (diff)
Merge branch 'topic/mem-encrypt' into next
This branch has some cross-arch patches that are a prequisite for the SVM work. They're in a topic branch in case any of the other arch maintainers want to merge them to resolve conflicts.
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/s390/Kconfig4
-rw-r--r--arch/s390/include/asm/mem_encrypt.h5
-rw-r--r--arch/s390/mm/init.c7
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/asm/mem_encrypt.h10
-rw-r--r--arch/x86/kernel/crash_dump_64.c5
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--fs/proc/vmcore.c8
-rw-r--r--include/linux/crash_dump.h14
-rw-r--r--include/linux/mem_encrypt.h15
-rw-r--r--kernel/dma/mapping.c8
-rw-r--r--kernel/dma/swiotlb.c3
13 files changed, 42 insertions, 46 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index a7b57dd42c26..89e2e3f64f79 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -925,6 +925,9 @@ config LOCK_EVENT_COUNTS
the chance of application behavior change because of timing
differences. The counts are reported via debugfs.
+config ARCH_HAS_MEM_ENCRYPT
+ bool
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index a4ad2733eedf..f43319c44454 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-config ARCH_HAS_MEM_ENCRYPT
- def_bool y
-
config MMU
def_bool y
@@ -68,6 +65,7 @@ config S390
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
+ select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX
diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h
index 3eb018508190..2542cbf7e2d1 100644
--- a/arch/s390/include/asm/mem_encrypt.h
+++ b/arch/s390/include/asm/mem_encrypt.h
@@ -4,10 +4,7 @@
#ifndef __ASSEMBLY__
-#define sme_me_mask 0ULL
-
-static inline bool sme_active(void) { return false; }
-extern bool sev_active(void);
+static inline bool mem_encrypt_active(void) { return false; }
int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 20340a03ad90..a124f19f7b3c 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -156,14 +156,9 @@ int set_memory_decrypted(unsigned long addr, int numpages)
}
/* are we a protected virtualization guest? */
-bool sev_active(void)
-{
- return is_prot_virt_guest();
-}
-
bool force_dma_unencrypted(struct device *dev)
{
- return sev_active();
+ return is_prot_virt_guest();
}
/* protected virtualization */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 222855cc0158..06027809c599 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -68,6 +68,7 @@ config X86
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64
+ select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PTE_DEVMAP if X86_64
@@ -1518,9 +1519,6 @@ config X86_CPA_STATISTICS
helps to determine the effectiveness of preserving large and huge
page mappings when mapping protections are changed.
-config ARCH_HAS_MEM_ENCRYPT
- def_bool y
-
config AMD_MEM_ENCRYPT
bool "AMD Secure Memory Encryption (SME) support"
depends on X86_64 && CPU_SUP_AMD
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 0c196c47d621..848ce43b9040 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -92,6 +92,16 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
+static inline bool mem_encrypt_active(void)
+{
+ return sme_me_mask;
+}
+
+static inline u64 sme_get_me_mask(void)
+{
+ return sme_me_mask;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index 22369dd5de3b..045e82e8945b 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -70,3 +70,8 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
{
return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true);
}
+
+ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+{
+ return read_from_oldmem(buf, count, ppos, 0, sev_active());
+}
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index fece30ca8b0c..9268c12458c8 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -344,13 +344,11 @@ bool sme_active(void)
{
return sme_me_mask && !sev_enabled;
}
-EXPORT_SYMBOL(sme_active);
bool sev_active(void)
{
return sme_me_mask && sev_enabled;
}
-EXPORT_SYMBOL(sev_active);
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev)
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 7bcc92add72c..7b13988796e1 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -104,9 +104,9 @@ static int pfn_is_ram(unsigned long pfn)
}
/* Reads a page from the oldmem device from given offset. */
-static ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
- bool encrypted)
+ssize_t read_from_oldmem(char *buf, size_t count,
+ u64 *ppos, int userbuf,
+ bool encrypted)
{
unsigned long pfn, offset;
size_t nr_bytes;
@@ -170,7 +170,7 @@ void __weak elfcorehdr_free(unsigned long long addr)
*/
ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
- return read_from_oldmem(buf, count, ppos, 0, sev_active());
+ return read_from_oldmem(buf, count, ppos, 0, false);
}
/*
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index f774c5eb9e3c..4664fc1871de 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
return -EOPNOTSUPP;
}
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
+
+#ifdef CONFIG_PROC_VMCORE
+ssize_t read_from_oldmem(char *buf, size_t count,
+ u64 *ppos, int userbuf,
+ bool encrypted);
+#else
+static inline ssize_t read_from_oldmem(char *buf, size_t count,
+ u64 *ppos, int userbuf,
+ bool encrypted)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PROC_VMCORE */
+
#endif /* LINUX_CRASHDUMP_H */
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 470bd53a89df..5c4a18a91f89 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -18,23 +18,10 @@
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
-#define sme_me_mask 0ULL
-
-static inline bool sme_active(void) { return false; }
-static inline bool sev_active(void) { return false; }
+static inline bool mem_encrypt_active(void) { return false; }
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
-static inline bool mem_encrypt_active(void)
-{
- return sme_me_mask;
-}
-
-static inline u64 sme_get_me_mask(void)
-{
- return sme_me_mask;
-}
-
#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* The __sme_set() and __sme_clr() macros are useful for adding or removing
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 1f628e7ac709..61eeefbfcb36 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -291,12 +291,6 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
EXPORT_SYMBOL(dma_free_attrs);
-static inline void dma_check_mask(struct device *dev, u64 mask)
-{
- if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
- dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
-}
-
int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
@@ -327,7 +321,6 @@ int dma_set_mask(struct device *dev, u64 mask)
return -EIO;
arch_dma_set_mask(dev, mask);
- dma_check_mask(dev, mask);
*dev->dma_mask = mask;
return 0;
}
@@ -345,7 +338,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
if (!dma_supported(dev, mask))
return -EIO;
- dma_check_mask(dev, mask);
dev->coherent_dma_mask = mask;
return 0;
}
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 9de232229063..f29caad71e13 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -461,8 +461,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
if (mem_encrypt_active())
- pr_warn_once("%s is active and system is using DMA bounce buffers\n",
- sme_active() ? "SME" : "SEV");
+ pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
mask = dma_get_seg_boundary(hwdev);