diff options
Diffstat (limited to 'arch/loongarch')
98 files changed, 1747 insertions, 1150 deletions
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 1a2cf012b8f2f..f0abc38c40ac9 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -15,7 +15,6 @@ config LOONGARCH select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_CPU_FINALIZE_INIT - select ARCH_HAS_CRC32 select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_FAST_MULTIPLIER @@ -25,7 +24,6 @@ config LOONGARCH select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PREEMPT_LAZY - select ARCH_HAS_PTE_DEVMAP select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_DIRECT_MAP @@ -69,6 +67,7 @@ config LOONGARCH select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 select ARCH_SUPPORTS_LTO_CLANG select ARCH_SUPPORTS_LTO_CLANG_THIN + select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_RT select ARCH_USE_BUILTIN_BSWAP @@ -119,6 +118,7 @@ config LOONGARCH select HAVE_ARCH_KASAN select HAVE_ARCH_KFENCE select HAVE_ARCH_KGDB if PERF_EVENTS + select HAVE_ARCH_KSTACK_ERASE select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_SECCOMP @@ -142,7 +142,6 @@ config LOONGARCH select HAVE_EXIT_THREAD select HAVE_GUP_FAST select HAVE_FTRACE_GRAPH_FUNC - select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_GRAPH_FREGS @@ -187,6 +186,7 @@ config LOONGARCH select MODULES_USE_ELF_RELA if MODULES select NEED_PER_CPU_EMBED_FIRST_CHUNK select NEED_PER_CPU_PAGE_FIRST_CHUNK + select NUMA_MEMBLKS if NUMA select OF select OF_EARLY_FLATTREE select PCI @@ -456,6 +456,15 @@ config SCHED_SMT Improves scheduler's performance when there are multiple threads in one physical core. +config SCHED_MC + bool "Multi-core scheduler support" + depends on SMP + default y + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. + config SMP bool "Multi-Processing support" help @@ -485,10 +494,10 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. config NR_CPUS - int "Maximum number of CPUs (2-256)" - range 2 256 + int "Maximum number of CPUs (2-2048)" + range 2 2048 + default "2048" depends on SMP - default "64" help This allows you to specify the maximum number of CPUs which this kernel will support. diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 0304eabbe6066..b0703a4e02a25 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -181,11 +181,14 @@ vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@ install: - $(Q)install -D -m 755 $(KBUILD_IMAGE) $(INSTALL_PATH)/$(image-name-y)-$(KERNELRELEASE) - $(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE) - $(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE) + $(call cmd,install) define archhelp - echo ' install - install kernel into $(INSTALL_PATH)' + echo ' vmlinux.elf - Uncompressed ELF kernel image (arch/loongarch/boot/vmlinux.elf)' + echo ' vmlinux.efi - Uncompressed EFI kernel image (arch/loongarch/boot/vmlinux.efi)' + echo ' vmlinuz.efi - GZIP/ZSTD-compressed EFI kernel image (arch/loongarch/boot/vmlinuz.efi)' + echo ' Default when CONFIG_EFI_ZBOOT=y' + echo ' install - Install kernel using (your) ~/bin/$(INSTALLKERNEL) or' + echo ' (distribution) /sbin/$(INSTALLKERNEL) or install.sh to $$(INSTALL_PATH)' echo endef diff --git a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts index a34734a6c3ce8..018ed904352a7 100644 --- a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts +++ b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts @@ -41,6 +41,15 @@ }; }; +&apbdma3 { + status = "okay"; +}; + +&mmc0 { + status = "okay"; + bus-width = <4>; +}; + &gmac0 { status = "okay"; diff --git a/arch/loongarch/boot/dts/loongson-2k0500.dtsi b/arch/loongarch/boot/dts/loongson-2k0500.dtsi index 3b38ff8853a7d..588ebc3bded40 100644 --- a/arch/loongarch/boot/dts/loongson-2k0500.dtsi +++ b/arch/loongarch/boot/dts/loongson-2k0500.dtsi @@ -104,7 +104,7 @@ status = "disabled"; }; - dma-controller@1fe10c20 { + apbdma2: dma-controller@1fe10c20 { compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma"; reg = <0 0x1fe10c20 0 0x8>; interrupt-parent = <&eiointc>; @@ -114,7 +114,7 @@ status = "disabled"; }; - dma-controller@1fe10c30 { + apbdma3: dma-controller@1fe10c30 { compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma"; reg = <0 0x1fe10c30 0 0x8>; interrupt-parent = <&eiointc>; @@ -169,6 +169,166 @@ interrupts = <3>; }; + pwm@1ff5c000 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c000 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c010 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c010 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c020 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c020 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c030 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c030 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c040 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c040 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c050 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c050 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c060 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c060 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c070 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c070 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c080 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c080 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c090 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c090 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0a0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0a0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0b0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0b0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0c0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0c0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0d0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0d0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0e0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0e0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0f0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0f0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + gmac0: ethernet@1f020000 { compatible = "snps,dwmac-3.70a"; reg = <0x0 0x1f020000 0x0 0x10000>; @@ -277,6 +437,30 @@ status = "disabled"; }; + mmc0: mmc@1ff64000 { + compatible = "loongson,ls2k0500-mmc"; + reg = <0 0x1ff64000 0 0x2000>, + <0 0x1fe10100 0 0x4>; + interrupt-parent = <&eiointc>; + interrupts = <57>; + dmas = <&apbdma3 0>; + dma-names = "rx-tx"; + clocks = <&clk LOONGSON2_APB_CLK>; + status = "disabled"; + }; + + mmc@1ff66000 { + compatible = "loongson,ls2k0500-mmc"; + reg = <0 0x1ff66000 0 0x2000>, + <0 0x1fe10100 0 0x4>; + interrupt-parent = <&eiointc>; + interrupts = <58>; + dmas = <&apbdma2 0>; + dma-names = "rx-tx"; + clocks = <&clk LOONGSON2_APB_CLK>; + status = "disabled"; + }; + pmc: power-management@1ff6c000 { compatible = "loongson,ls2k0500-pmc", "syscon"; reg = <0x0 0x1ff6c000 0x0 0x58>; diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts index 3514ea78f5256..d9a452ada5d77 100644 --- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts +++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts @@ -5,6 +5,7 @@ /dts-v1/; +#include "dt-bindings/thermal/thermal.h" #include "loongson-2k1000.dtsi" / { @@ -38,6 +39,26 @@ linux,cma-default; }; }; + + fan0: pwm-fan { + compatible = "pwm-fan"; + cooling-levels = <255 153 85 25>; + pwms = <&pwm1 0 100000 0>; + #cooling-cells = <2>; + }; +}; + +&apbdma1 { + status = "okay"; +}; + +&mmc { + status = "okay"; + + pinctrl-0 = <&sdio_pins_default>; + pinctrl-names = "default"; + bus-width = <4>; + cd-gpios = <&gpio0 22 GPIO_ACTIVE_LOW>; }; &gmac0 { @@ -92,6 +113,22 @@ #size-cells = <0>; }; +&pwm1 { + status = "okay"; + + pinctrl-0 = <&pwm1_pins_default>; + pinctrl-names = "default"; +}; + +&cpu_thermal { + cooling-maps { + map0 { + trip = <&cpu_alert>; + cooling-device = <&fan0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; +}; + &ehci0 { status = "okay"; }; diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi index 8dff2aa524171..d8e01e2534dde 100644 --- a/arch/loongarch/boot/dts/loongson-2k1000.dtsi +++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi @@ -68,7 +68,7 @@ }; thermal-zones { - cpu-thermal { + cpu_thermal: cpu-thermal { polling-delay-passive = <1000>; polling-delay = <5000>; thermal-sensors = <&tsensor 0>; @@ -187,14 +187,14 @@ <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, - <>, - <26 IRQ_TYPE_LEVEL_HIGH>, + <0 IRQ_TYPE_NONE>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, + <26 IRQ_TYPE_NONE>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, @@ -209,13 +209,13 @@ <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, - <>, + <0 IRQ_TYPE_NONE>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, - <>, - <>, + <0 IRQ_TYPE_NONE>, + <0 IRQ_TYPE_NONE>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, @@ -256,7 +256,7 @@ status = "disabled"; }; - dma-controller@1fe00c10 { + apbdma1: dma-controller@1fe00c10 { compatible = "loongson,ls2k1000-apbdma"; reg = <0x0 0x1fe00c10 0x0 0x8>; interrupt-parent = <&liointc1>; @@ -322,6 +322,46 @@ status = "disabled"; }; + pwm@1fe22000 { + compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1fe22000 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm1: pwm@1fe22010 { + compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1fe22010 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1fe22020 { + compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1fe22020 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1fe22030 { + compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1fe22030 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + pmc: power-management@1fe27000 { compatible = "loongson,ls2k1000-pmc", "loongson,ls2k0500-pmc", "syscon"; reg = <0x0 0x1fe27000 0x0 0x58>; @@ -365,6 +405,18 @@ status = "disabled"; }; + mmc: mmc@1fe2c000 { + compatible = "loongson,ls2k1000-mmc"; + reg = <0 0x1fe2c000 0 0x68>, + <0 0x1fe00438 0 0x8>; + interrupt-parent = <&liointc0>; + interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + dmas = <&apbdma1 0>; + dma-names = "rx-tx"; + status = "disabled"; + }; + spi0: spi@1fff0220 { compatible = "loongson,ls2k1000-spi"; reg = <0x0 0x1fff0220 0x0 0x10>; diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts index ea9e6985d0e9f..3c6b122203865 100644 --- a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts +++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts @@ -39,6 +39,16 @@ }; }; +&emmc { + status = "okay"; + + bus-width = <8>; + cap-mmc-highspeed; + mmc-hs200-1_8v; + no-sd; + no-sdio; +}; + &sata { status = "okay"; }; diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi index b4ff55a33e90b..00cc485b753b1 100644 --- a/arch/loongarch/boot/dts/loongson-2k2000.dtsi +++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi @@ -165,6 +165,66 @@ interrupt-parent = <&eiointc>; }; + pwm@100a0000 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0000 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@100a0100 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0100 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@100a0200 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0200 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@100a0300 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0300 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@100a0400 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0400 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <38 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@100a0500 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0500 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <39 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + rtc0: rtc@100d0100 { compatible = "loongson,ls2k2000-rtc", "loongson,ls7a-rtc"; reg = <0x0 0x100d0100 0x0 0x100>; @@ -199,6 +259,24 @@ status = "disabled"; }; + emmc: mmc@79990000 { + compatible = "loongson,ls2k2000-mmc"; + reg = <0x0 0x79990000 0x0 0x1000>; + interrupt-parent = <&pic>; + interrupts = <51 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_EMMC_CLK>; + status = "disabled"; + }; + + mmc@79991000 { + compatible = "loongson,ls2k2000-mmc"; + reg = <0x0 0x79991000 0x0 0x1000>; + interrupt-parent = <&pic>; + interrupts = <50 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_EMMC_CLK>; + status = "disabled"; + }; + pcie@1a000000 { compatible = "loongson,ls2k-pci"; reg = <0x0 0x1a000000 0x0 0x02000000>, diff --git a/arch/loongarch/boot/install.sh b/arch/loongarch/boot/install.sh new file mode 100755 index 0000000000000..daac197d33151 --- /dev/null +++ b/arch/loongarch/boot/install.sh @@ -0,0 +1,56 @@ +#!/bin/sh +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1995 by Linus Torvalds +# +# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin +# Adapted from code in arch/i386/boot/install.sh by Russell King +# +# "make install" script for the LoongArch Linux port +# +# Arguments: +# $1 - kernel version +# $2 - kernel image file +# $3 - kernel map file +# $4 - default install path (blank if root directory) + +set -e + +case "${2##*/}" in +vmlinux.elf) + echo "Installing uncompressed vmlinux.elf kernel" + base=vmlinux + ;; +vmlinux.efi) + echo "Installing uncompressed vmlinux.efi kernel" + base=vmlinux + ;; +vmlinuz.efi) + echo "Installing gzip/zstd compressed vmlinuz.efi kernel" + base=vmlinuz + ;; +*) + echo "Warning: Unexpected kernel type" + exit 1 + ;; +esac + +if [ -f $4/$base-$1 ]; then + mv $4/$base-$1 $4/$base-$1.old +fi +cat $2 > $4/$base-$1 + +# Install system map file +if [ -f $4/System.map-$1 ]; then + mv $4/System.map-$1 $4/System.map-$1.old +fi +cp $3 $4/System.map-$1 + +# Install kernel config file +if [ -f $4/config-$1 ]; then + mv $4/config-$1 $4/config-$1.old +fi +cp .config $4/config-$1 diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 90f21dfe22b13..34eaee0384c92 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -225,7 +225,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m @@ -784,8 +783,23 @@ CONFIG_SND_HDA_HWDEP=y CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_PATCH_LOADER=y CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_REALTEK_LIB=y +CONFIG_SND_HDA_CODEC_ALC260=y +CONFIG_SND_HDA_CODEC_ALC262=y +CONFIG_SND_HDA_CODEC_ALC268=y +CONFIG_SND_HDA_CODEC_ALC269=y +CONFIG_SND_HDA_CODEC_ALC662=y +CONFIG_SND_HDA_CODEC_ALC680=y +CONFIG_SND_HDA_CODEC_ALC861=y +CONFIG_SND_HDA_CODEC_ALC861VD=y +CONFIG_SND_HDA_CODEC_ALC880=y +CONFIG_SND_HDA_CODEC_ALC882=y CONFIG_SND_HDA_CODEC_SIGMATEL=y CONFIG_SND_HDA_CODEC_HDMI=y +CONFIG_SND_HDA_CODEC_HDMI_GENERIC=y +CONFIG_SND_HDA_CODEC_HDMI_INTEL=y +CONFIG_SND_HDA_CODEC_HDMI_ATI=y +CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y CONFIG_SND_HDA_CODEC_CONEXANT=y CONFIG_SND_USB_AUDIO=m CONFIG_SND_SOC=m @@ -1026,7 +1040,7 @@ CONFIG_SECURITY_APPARMOR=y CONFIG_SECURITY_YAMA=y CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SELFTESTS=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_ANUBIS=m diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index 80ddb5edb8455..b04d2cef935f6 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -10,5 +10,4 @@ generic-y += user.h generic-y += ioctl.h generic-y += mmzone.h generic-y += statfs.h -generic-y += param.h generic-y += text-patching.h diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h index 313f66f7913af..7376840fa9f78 100644 --- a/arch/loongarch/include/asm/acpi.h +++ b/arch/loongarch/include/asm/acpi.h @@ -33,7 +33,7 @@ static inline bool acpi_has_cpu_in_madt(void) return true; } -#define MAX_CORE_PIC 256 +#define MAX_CORE_PIC 2048 extern struct list_head acpi_wakeup_device_list; extern struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC]; diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index fe198b473f849..e739dbc6329dc 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -18,12 +18,12 @@ /* * This gives the physical RAM offset. */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #ifndef PHYS_OFFSET #define PHYS_OFFSET _UL(0) #endif extern unsigned long vm_map_base; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #ifndef IO_BASE #define IO_BASE CSR_DMW0_BASE @@ -66,7 +66,7 @@ extern unsigned long vm_map_base; #define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) #endif -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define _ATYPE_ #define _ATYPE32_ #define _ATYPE64_ @@ -85,7 +85,7 @@ extern unsigned long vm_map_base; /* * 32/64-bit LoongArch address spaces */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define _ACAST32_ #define _ACAST64_ #else diff --git a/arch/loongarch/include/asm/alternative-asm.h b/arch/loongarch/include/asm/alternative-asm.h index ff3d10ac393f2..7dc29bd9b2f0e 100644 --- a/arch/loongarch/include/asm/alternative-asm.h +++ b/arch/loongarch/include/asm/alternative-asm.h @@ -2,7 +2,7 @@ #ifndef _ASM_ALTERNATIVE_ASM_H #define _ASM_ALTERNATIVE_ASM_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #include <asm/asm.h> @@ -77,6 +77,6 @@ .previous .endm -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_ALTERNATIVE_ASM_H */ diff --git a/arch/loongarch/include/asm/alternative.h b/arch/loongarch/include/asm/alternative.h index cee7b29785ab2..b5bae21fb3c85 100644 --- a/arch/loongarch/include/asm/alternative.h +++ b/arch/loongarch/include/asm/alternative.h @@ -2,7 +2,7 @@ #ifndef _ASM_ALTERNATIVE_H #define _ASM_ALTERNATIVE_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #include <linux/stddef.h> @@ -106,6 +106,6 @@ extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); #define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ (asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_ALTERNATIVE_H */ diff --git a/arch/loongarch/include/asm/asm-extable.h b/arch/loongarch/include/asm/asm-extable.h index df05005f2b80a..d60bdf2e63775 100644 --- a/arch/loongarch/include/asm/asm-extable.h +++ b/arch/loongarch/include/asm/asm-extable.h @@ -7,7 +7,7 @@ #define EX_TYPE_UACCESS_ERR_ZERO 2 #define EX_TYPE_BPF 3 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ .pushsection __ex_table, "a"; \ @@ -22,7 +22,7 @@ __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0) .endm -#else /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ #include <linux/bits.h> #include <linux/stringify.h> @@ -60,6 +60,6 @@ #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_ASM_EXTABLE_H */ diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h index 51f224bcfc654..704066b4f7368 100644 --- a/arch/loongarch/include/asm/asm-prototypes.h +++ b/arch/loongarch/include/asm/asm-prototypes.h @@ -12,3 +12,11 @@ __int128_t __ashlti3(__int128_t a, int b); __int128_t __ashrti3(__int128_t a, int b); __int128_t __lshrti3(__int128_t a, int b); #endif + +asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev, + struct pt_regs *regs); + +asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev, + struct pt_regs *regs, + int (*fn)(void *), + void *fn_arg); diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h index f591b3245def6..f018d26fc995a 100644 --- a/arch/loongarch/include/asm/asm.h +++ b/arch/loongarch/include/asm/asm.h @@ -110,7 +110,7 @@ #define LONG_SRA srai.w #define LONG_SRAV sra.w -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define LONG .word #endif #define LONGSIZE 4 @@ -131,7 +131,7 @@ #define LONG_SRA srai.d #define LONG_SRAV sra.d -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define LONG .dword #endif #define LONGSIZE 8 @@ -158,7 +158,7 @@ #define PTR_SCALESHIFT 2 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define PTR .word #endif #define PTRSIZE 4 @@ -181,7 +181,7 @@ #define PTR_SCALESHIFT 3 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define PTR .dword #endif #define PTRSIZE 8 diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h index 98cf4d7b4b0a0..dfb982fe87019 100644 --- a/arch/loongarch/include/asm/cpu.h +++ b/arch/loongarch/include/asm/cpu.h @@ -46,7 +46,7 @@ #define PRID_PRODUCT_MASK 0x0fff -#if !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLER__) enum cpu_type_enum { CPU_UNKNOWN, @@ -55,7 +55,7 @@ enum cpu_type_enum { CPU_LAST }; -#endif /* !__ASSEMBLY */ +#endif /* !__ASSEMBLER__ */ /* * ISA Level encodings diff --git a/arch/loongarch/include/asm/entry-common.h b/arch/loongarch/include/asm/entry-common.h index 0fe2a098ded96..099132980dc9d 100644 --- a/arch/loongarch/include/asm/entry-common.h +++ b/arch/loongarch/include/asm/entry-common.h @@ -2,12 +2,6 @@ #ifndef ARCH_LOONGARCH_ENTRY_COMMON_H #define ARCH_LOONGARCH_ENTRY_COMMON_H -#include <linux/sched.h> -#include <linux/processor.h> - -static inline bool on_thread_stack(void) -{ - return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); -} +#include <asm/stacktrace.h> /* For on_thread_stack() */ #endif diff --git a/arch/loongarch/include/asm/ftrace.h b/arch/loongarch/include/asm/ftrace.h index 6e0a99763a9a7..f4caaf764f9ee 100644 --- a/arch/loongarch/include/asm/ftrace.h +++ b/arch/loongarch/include/asm/ftrace.h @@ -14,7 +14,7 @@ #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #ifndef CONFIG_DYNAMIC_FTRACE @@ -84,7 +84,7 @@ __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) #endif -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/arch/loongarch/include/asm/gpr-num.h b/arch/loongarch/include/asm/gpr-num.h index 996038da806d1..af95b941f48bd 100644 --- a/arch/loongarch/include/asm/gpr-num.h +++ b/arch/loongarch/include/asm/gpr-num.h @@ -2,7 +2,7 @@ #ifndef __ASM_GPR_NUM_H #define __ASM_GPR_NUM_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ .equ .L__gpr_num_zero, 0 .irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 @@ -25,7 +25,7 @@ .equ .L__gpr_num_$s\num, 23 + \num .endr -#else /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ #define __DEFINE_ASM_GPR_NUMS \ " .equ .L__gpr_num_zero, 0\n" \ @@ -47,6 +47,6 @@ " .equ .L__gpr_num_$s\\num, 23 + \\num\n" \ " .endr\n" \ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_GPR_NUM_H */ diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h index 4dc4b3e04225f..ab68b594f889d 100644 --- a/arch/loongarch/include/asm/hugetlb.h +++ b/arch/loongarch/include/asm/hugetlb.h @@ -10,20 +10,6 @@ uint64_t pmd_to_entrylo(unsigned long pmd_val); -#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, - unsigned long len) -{ - unsigned long task_size = STACK_TOP; - - if (len > task_size) - return -ENOMEM; - if (task_size - len < addr) - return -EINVAL; - return 0; -} - #define __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 3089785ca97e7..277d2140676b6 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -497,6 +497,7 @@ void arch_simulate_insn(union loongarch_instruction insn, struct pt_regs *regs); int larch_insn_read(void *addr, u32 *insnp); int larch_insn_write(void *addr, u32 insn); int larch_insn_patch_text(void *addr, u32 insn); +int larch_insn_text_copy(void *dst, void *src, size_t len); u32 larch_insn_gen_nop(void); u32 larch_insn_gen_b(unsigned long pc, unsigned long dest); @@ -510,6 +511,8 @@ u32 larch_insn_gen_move(enum loongarch_gpr rd, enum loongarch_gpr rj); u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm); u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm); u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); +u32 larch_insn_gen_beq(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); +u32 larch_insn_gen_bne(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); static inline bool signed_imm_check(long val, unsigned int bit) diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h index 319a8c616f1f5..620163628a7f6 100644 --- a/arch/loongarch/include/asm/irqflags.h +++ b/arch/loongarch/include/asm/irqflags.h @@ -5,7 +5,7 @@ #ifndef _ASM_IRQFLAGS_H #define _ASM_IRQFLAGS_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/compiler.h> #include <linux/stringify.h> @@ -14,40 +14,48 @@ static inline void arch_local_irq_enable(void) { u32 flags = CSR_CRMD_IE; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } static inline void arch_local_irq_disable(void) { u32 flags = 0; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } static inline unsigned long arch_local_irq_save(void) { u32 flags = 0; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); return flags; } static inline void arch_local_irq_restore(unsigned long flags) { + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } @@ -72,6 +80,6 @@ static inline int arch_irqs_disabled(void) return arch_irqs_disabled_flags(arch_local_save_flags()); } -#endif /* #ifndef __ASSEMBLY__ */ +#endif /* #ifndef __ASSEMBLER__ */ #endif /* _ASM_IRQFLAGS_H */ diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h index 8a924bd69d196..4000c7603d8e3 100644 --- a/arch/loongarch/include/asm/jump_label.h +++ b/arch/loongarch/include/asm/jump_label.h @@ -7,7 +7,7 @@ #ifndef __ASM_JUMP_LABEL_H #define __ASM_JUMP_LABEL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> @@ -50,5 +50,5 @@ l_yes: return true; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h index 7f52bd31b9d4f..62f139a9c87da 100644 --- a/arch/loongarch/include/asm/kasan.h +++ b/arch/loongarch/include/asm/kasan.h @@ -2,7 +2,7 @@ #ifndef __ASM_KASAN_H #define __ASM_KASAN_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/linkage.h> #include <linux/mmzone.h> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index f457c2662e2f2..0cecbd038bb3c 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -50,12 +50,6 @@ struct kvm_vm_stat { struct kvm_vm_stat_generic generic; u64 pages; u64 hugepages; - u64 ipi_read_exits; - u64 ipi_write_exits; - u64 eiointc_read_exits; - u64 eiointc_write_exits; - u64 pch_pic_read_exits; - u64 pch_pic_write_exits; }; struct kvm_vcpu_stat { @@ -65,6 +59,12 @@ struct kvm_vcpu_stat { u64 cpucfg_exits; u64 signal_exits; u64 hypercall_exits; + u64 ipi_read_exits; + u64 ipi_write_exits; + u64 eiointc_read_exits; + u64 eiointc_write_exits; + u64 pch_pic_read_exits; + u64 pch_pic_write_exits; }; #define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) @@ -301,7 +301,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); /* MMU handling */ void kvm_flush_tlb_all(void); void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); -int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write, int ecode); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 2c349f961bfbc..f1efd7cfbc20f 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -37,7 +37,7 @@ #define KVM_LOONGSON_IRQ_NUM_MASK 0xffff typedef union loongarch_instruction larch_inst; -typedef int (*exit_handle_fn)(struct kvm_vcpu *); +typedef int (*exit_handle_fn)(struct kvm_vcpu *, int); int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst); int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst); diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 52651aa0e5834..09dfd7eb406e7 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -9,15 +9,15 @@ #include <linux/linkage.h> #include <linux/types.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <larchintrin.h> /* CPUCFG */ #define read_cpucfg(reg) __cpucfg(reg) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ /* LoongArch Registers */ #define REG_ZERO 0x0 @@ -53,7 +53,7 @@ #define REG_S7 0x1e #define REG_S8 0x1f -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ /* Bit fields for CPUCFG registers */ #define LOONGARCH_CPUCFG0 0x0 @@ -171,7 +171,7 @@ * SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* CSR */ #define csr_read32(reg) __csrrd_w(reg) @@ -187,7 +187,7 @@ #define iocsr_write32(val, reg) __iocsrwr_w(val, reg) #define iocsr_write64(val, reg) __iocsrwr_d(val, reg) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* CSR register number */ @@ -411,8 +411,8 @@ /* Config CSR registers */ #define LOONGARCH_CSR_CPUID 0x20 /* CPU core id */ -#define CSR_CPUID_COREID_WIDTH 9 -#define CSR_CPUID_COREID _ULCAST_(0x1ff) +#define CSR_CPUID_COREID_WIDTH 11 +#define CSR_CPUID_COREID _ULCAST_(0x7ff) #define LOONGARCH_CSR_PRCFG1 0x21 /* Config1 */ #define CSR_CONF1_VSMAX_SHIFT 12 @@ -451,6 +451,13 @@ #define LOONGARCH_CSR_KS6 0x36 #define LOONGARCH_CSR_KS7 0x37 #define LOONGARCH_CSR_KS8 0x38 +#define LOONGARCH_CSR_KS9 0x39 +#define LOONGARCH_CSR_KS10 0x3a +#define LOONGARCH_CSR_KS11 0x3b +#define LOONGARCH_CSR_KS12 0x3c +#define LOONGARCH_CSR_KS13 0x3d +#define LOONGARCH_CSR_KS14 0x3e +#define LOONGARCH_CSR_KS15 0x3f /* Exception allocated KS0, KS1 and KS2 statically */ #define EXCEPTION_KS0 LOONGARCH_CSR_KS0 @@ -1195,7 +1202,7 @@ #define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE 0x1c00 #define IOCSR_EXTIOI_VECTOR_NUM 256 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ static __always_inline u64 drdtime(void) { @@ -1357,7 +1364,7 @@ __BUILD_CSR_OP(tlbidx) #define clear_csr_estat(val) \ csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ /* Generic EntryLo bit definitions */ #define ENTRYLO_V (_ULCAST_(1) << 0) diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h index b5f9de9f102e4..bbf9f70bd25f4 100644 --- a/arch/loongarch/include/asm/numa.h +++ b/arch/loongarch/include/asm/numa.h @@ -22,20 +22,6 @@ extern int numa_off; extern s16 __cpuid_to_node[CONFIG_NR_CPUS]; extern nodemask_t numa_nodes_parsed __initdata; -struct numa_memblk { - u64 start; - u64 end; - int nid; -}; - -#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) -struct numa_meminfo { - int nr_blks; - struct numa_memblk blk[NR_NODE_MEMBLKS]; -}; - -extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); - extern void __init early_numa_add_cpu(int cpuid, s16 node); extern void numa_add_cpu(unsigned int cpu); extern void numa_remove_cpu(unsigned int cpu); diff --git a/arch/loongarch/include/asm/orc_types.h b/arch/loongarch/include/asm/orc_types.h index caf1f71a1057b..d5fa98d1d1779 100644 --- a/arch/loongarch/include/asm/orc_types.h +++ b/arch/loongarch/include/asm/orc_types.h @@ -34,7 +34,7 @@ #define ORC_TYPE_REGS 3 #define ORC_TYPE_REGS_PARTIAL 4 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * This struct is more or less a vastly simplified version of the DWARF Call * Frame Information standard. It contains only the necessary parts of DWARF @@ -53,6 +53,6 @@ struct orc_entry { unsigned int type:3; unsigned int signal:1; }; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ORC_TYPES_H */ diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h index 7368f12b7cb1e..a3aaf34fba16a 100644 --- a/arch/loongarch/include/asm/page.h +++ b/arch/loongarch/include/asm/page.h @@ -15,7 +15,7 @@ #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/kernel.h> #include <linux/pfn.h> @@ -110,6 +110,6 @@ extern int __virt_addr_valid(volatile void *kaddr); #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_PAGE_H */ diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index b58f587f0f0ad..1c63a9d9a6d35 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -69,7 +69,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) if (!ptdesc) return NULL; - if (!pagetable_pmd_ctor(ptdesc)) { + if (!pagetable_pmd_ctor(mm, ptdesc)) { pagetable_free(ptdesc); return NULL; } diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h index 45bfc65a0c9f9..2fc3789220ac6 100644 --- a/arch/loongarch/include/asm/pgtable-bits.h +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -22,7 +22,6 @@ #define _PAGE_PFN_SHIFT 12 #define _PAGE_SWP_EXCLUSIVE_SHIFT 23 #define _PAGE_PFN_END_SHIFT 48 -#define _PAGE_DEVMAP_SHIFT 59 #define _PAGE_PRESENT_INVALID_SHIFT 60 #define _PAGE_NO_READ_SHIFT 61 #define _PAGE_NO_EXEC_SHIFT 62 @@ -36,7 +35,6 @@ #define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT) #define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT) #define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT) -#define _PAGE_DEVMAP (_ULCAST_(1) << _PAGE_DEVMAP_SHIFT) /* We borrow bit 23 to store the exclusive marker in swap PTEs. */ #define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT) @@ -76,8 +74,8 @@ #define __READABLE (_PAGE_VALID) #define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE) -#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV) -#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE) +#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV) +#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE) #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \ _PAGE_USER | _CACHE_CC) @@ -92,7 +90,7 @@ #define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC) @@ -127,6 +125,6 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) return __pgprot(prot); } -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_PGTABLE_BITS_H */ diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index da346733a1dae..bd128696e96d0 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -55,7 +55,7 @@ #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/mm_types.h> #include <linux/mmzone.h> @@ -255,7 +255,6 @@ static inline void pmd_clear(pmd_t *pmdp) #define pmd_page_vaddr(pmd) pmd_val(pmd) -extern pmd_t mk_pmd(struct page *page, pgprot_t prot); extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); #define pte_page(x) pfn_to_page(pte_pfn(x)) @@ -302,7 +301,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) #define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE }) -static inline int pte_swp_exclusive(pte_t pte) +static inline bool pte_swp_exclusive(pte_t pte) { return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; } @@ -410,9 +409,6 @@ static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ -static inline int pte_devmap(pte_t pte) { return !!(pte_val(pte) & _PAGE_DEVMAP); } -static inline pte_t pte_mkdevmap(pte_t pte) { pte_val(pte) |= _PAGE_DEVMAP; return pte; } - #define pte_accessible pte_accessible static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) { @@ -426,12 +422,6 @@ static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) return false; } -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & _PAGE_CHG_MASK) | @@ -547,17 +537,6 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd) return pmd; } -static inline int pmd_devmap(pmd_t pmd) -{ - return !!(pmd_val(pmd) & _PAGE_DEVMAP); -} - -static inline pmd_t pmd_mkdevmap(pmd_t pmd) -{ - pmd_val(pmd) |= _PAGE_DEVMAP; - return pmd; -} - static inline struct page *pmd_page(pmd_t pmd) { if (pmd_trans_huge(pmd)) @@ -613,11 +592,6 @@ static inline long pmd_protnone(pmd_t pmd) #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define pud_devmap(pud) (0) -#define pgd_devmap(pgd) (0) -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - /* * We provide our own get_unmapped area to cope with the virtual aliasing * constraints placed on us by the cache architecture. @@ -625,6 +599,6 @@ static inline long pmd_protnone(pmd_t pmd) #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_PGTABLE_H */ diff --git a/arch/loongarch/include/asm/prefetch.h b/arch/loongarch/include/asm/prefetch.h index 1672262a5e2ef..0b168cdaae9a9 100644 --- a/arch/loongarch/include/asm/prefetch.h +++ b/arch/loongarch/include/asm/prefetch.h @@ -8,7 +8,7 @@ #define Pref_Load 0 #define Pref_Store 8 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ .macro __pref hint addr #ifdef CONFIG_CPU_HAS_PREFETCH diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index b87d1d5e58905..3a47f52959a88 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -25,6 +25,7 @@ extern int smp_num_siblings; extern int num_processors; extern int disabled_cpus; extern cpumask_t cpu_sibling_map[]; +extern cpumask_t cpu_llc_shared_map[]; extern cpumask_t cpu_core_map[]; extern cpumask_t cpu_foreign_map[]; @@ -38,7 +39,7 @@ int loongson_cpu_disable(void); void loongson_cpu_die(unsigned int cpu); #endif -static inline void plat_smp_setup(void) +static inline void __init plat_smp_setup(void) { loongson_smp_setup(); } diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h index 8d4af6aff8a8f..4501efac1a876 100644 --- a/arch/loongarch/include/asm/sparsemem.h +++ b/arch/loongarch/include/asm/sparsemem.h @@ -21,11 +21,6 @@ #define VMEMMAP_SIZE 0 /* 1, For FLATMEM; 2, For SPARSEMEM without VMEMMAP. */ #endif -#ifdef CONFIG_MEMORY_HOTPLUG -int memory_add_physaddr_to_nid(u64 addr); -#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid -#endif - #define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS) #endif /* _LOONGARCH_SPARSEMEM_H */ diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h index 66736837085b6..3eda298702b19 100644 --- a/arch/loongarch/include/asm/stackframe.h +++ b/arch/loongarch/include/asm/stackframe.h @@ -57,6 +57,12 @@ jirl zero, \temp1, 0xc .endm + .macro STACKLEAK_ERASE +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK + bl stackleak_erase_on_task_stack +#endif + .endm + .macro BACKUP_T0T1 csrwr t0, EXCEPTION_KS0 csrwr t1, EXCEPTION_KS1 diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h index fc8b64773794a..5c8be156567cd 100644 --- a/arch/loongarch/include/asm/stacktrace.h +++ b/arch/loongarch/include/asm/stacktrace.h @@ -31,6 +31,11 @@ bool in_irq_stack(unsigned long stack, struct stack_info *info); bool in_task_stack(unsigned long stack, struct task_struct *task, struct stack_info *info); int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_info *info); +static __always_inline bool on_thread_stack(void) +{ + return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); +} + #define STR_LONG_L __stringify(LONG_L) #define STR_LONG_S __stringify(LONG_S) #define STR_LONGSIZE __stringify(LONGSIZE) diff --git a/arch/loongarch/include/asm/syscall.h b/arch/loongarch/include/asm/syscall.h index e286dc58476e6..81d2733f7b949 100644 --- a/arch/loongarch/include/asm/syscall.h +++ b/arch/loongarch/include/asm/syscall.h @@ -26,6 +26,13 @@ static inline long syscall_get_nr(struct task_struct *task, return regs->regs[11]; } +static inline void syscall_set_nr(struct task_struct *task, + struct pt_regs *regs, + int nr) +{ + regs->regs[11] = nr; +} + static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { @@ -61,6 +68,14 @@ static inline void syscall_get_arguments(struct task_struct *task, memcpy(&args[1], ®s->regs[5], 5 * sizeof(long)); } +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + regs->orig_a0 = args[0]; + memcpy(®s->regs[5], &args[1], 5 * sizeof(long)); +} + static inline int syscall_get_arch(struct task_struct *task) { return AUDIT_ARCH_LOONGARCH64; diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h index 4f5a9441754e3..9dfa2ef008167 100644 --- a/arch/loongarch/include/asm/thread_info.h +++ b/arch/loongarch/include/asm/thread_info.h @@ -10,7 +10,7 @@ #ifdef __KERNEL__ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/processor.h> @@ -53,7 +53,7 @@ static inline struct thread_info *current_thread_info(void) register unsigned long current_stack_pointer __asm__("$sp"); -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* thread information allocation */ #define THREAD_SIZE SZ_16K diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h index 50273c9187d02..f06e7ff25bb7c 100644 --- a/arch/loongarch/include/asm/topology.h +++ b/arch/loongarch/include/asm/topology.h @@ -19,17 +19,22 @@ extern int pcibus_to_node(struct pci_bus *); #define cpumask_of_pcibus(bus) (cpu_online_mask) -extern unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES]; - -void numa_set_distance(int from, int to, int distance); - -#define node_distance(from, to) (node_distances[(from)][(to)]) +int __node_distance(int from, int to); +#define node_distance(from, to) __node_distance(from, to) #else #define pcibus_to_node(bus) 0 #endif #ifdef CONFIG_SMP +/* + * Return cpus that shares the last level cache. + */ +static inline const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return &cpu_llc_shared_map[cpu]; +} + #define topology_physical_package_id(cpu) (cpu_data[cpu].package) #define topology_core_id(cpu) (cpu_data[cpu].core) #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) diff --git a/arch/loongarch/include/asm/types.h b/arch/loongarch/include/asm/types.h index baf15a0dcf8b5..0edd731f3d6a0 100644 --- a/arch/loongarch/include/asm/types.h +++ b/arch/loongarch/include/asm/types.h @@ -8,7 +8,7 @@ #include <asm-generic/int-ll64.h> #include <uapi/asm/types.h> -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define _ULCAST_ #define _U64CAST_ #else diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h index 2c68bc72736c9..16c7f7e465a0f 100644 --- a/arch/loongarch/include/asm/unwind_hints.h +++ b/arch/loongarch/include/asm/unwind_hints.h @@ -5,7 +5,7 @@ #include <linux/objtool.h> #include <asm/orc_types.h> -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ .macro UNWIND_HINT_UNDEFINED UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED @@ -23,7 +23,7 @@ UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL .endm -#else /* !__ASSEMBLY__ */ +#else /* !__ASSEMBLER__ */ #define UNWIND_HINT_SAVE \ UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0) @@ -31,6 +31,6 @@ #define UNWIND_HINT_RESTORE \ UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */ diff --git a/arch/loongarch/include/asm/vdso/arch_data.h b/arch/loongarch/include/asm/vdso/arch_data.h index 322d0a5f1c844..395ec223bcbed 100644 --- a/arch/loongarch/include/asm/vdso/arch_data.h +++ b/arch/loongarch/include/asm/vdso/arch_data.h @@ -7,7 +7,7 @@ #ifndef _VDSO_ARCH_DATA_H #define _VDSO_ARCH_DATA_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/asm.h> #include <asm/vdso.h> @@ -20,6 +20,6 @@ struct vdso_arch_data { struct vdso_pcpu_data pdata[NR_CPUS]; }; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/loongarch/include/asm/vdso/getrandom.h b/arch/loongarch/include/asm/vdso/getrandom.h index 48c43f55b039b..2ff05003c6e74 100644 --- a/arch/loongarch/include/asm/vdso/getrandom.h +++ b/arch/loongarch/include/asm/vdso/getrandom.h @@ -5,7 +5,7 @@ #ifndef __ASM_VDSO_GETRANDOM_H #define __ASM_VDSO_GETRANDOM_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/unistd.h> #include <asm/vdso/vdso.h> @@ -20,7 +20,7 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (buffer), "r" (len), "r" (flags) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); @@ -28,6 +28,6 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns return ret; } -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_GETRANDOM_H */ diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h index 88cfcf1331163..dcafabca9bb69 100644 --- a/arch/loongarch/include/asm/vdso/gettimeofday.h +++ b/arch/loongarch/include/asm/vdso/gettimeofday.h @@ -7,7 +7,7 @@ #ifndef __ASM_VDSO_GETTIMEOFDAY_H #define __ASM_VDSO_GETTIMEOFDAY_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/unistd.h> #include <asm/vdso/vdso.h> @@ -25,7 +25,7 @@ static __always_inline long gettimeofday_fallback( asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (tv), "r" (tz) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); @@ -44,7 +44,7 @@ static __always_inline long clock_gettime_fallback( asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (clkid), "r" (ts) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); @@ -63,7 +63,7 @@ static __always_inline int clock_getres_fallback( asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (clkid), "r" (ts) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); @@ -89,6 +89,6 @@ static inline bool loongarch_vdso_hres_capable(void) } #define __arch_vdso_hres_capable loongarch_vdso_hres_capable -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/loongarch/include/asm/vdso/processor.h b/arch/loongarch/include/asm/vdso/processor.h index ef5770b343a01..1e255373b0b8e 100644 --- a/arch/loongarch/include/asm/vdso/processor.h +++ b/arch/loongarch/include/asm/vdso/processor.h @@ -5,10 +5,10 @@ #ifndef __ASM_VDSO_PROCESSOR_H #define __ASM_VDSO_PROCESSOR_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define cpu_relax() barrier() -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h index 50c65fb29dafb..04bd2d4528769 100644 --- a/arch/loongarch/include/asm/vdso/vdso.h +++ b/arch/loongarch/include/asm/vdso/vdso.h @@ -7,7 +7,7 @@ #ifndef _ASM_VDSO_VDSO_H #define _ASM_VDSO_VDSO_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/asm.h> #include <asm/page.h> @@ -16,6 +16,6 @@ #define VVAR_SIZE (VDSO_NR_PAGES << PAGE_SHIFT) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h index 1140b54b4bc82..558eb9dfda520 100644 --- a/arch/loongarch/include/asm/vdso/vsyscall.h +++ b/arch/loongarch/include/asm/vdso/vsyscall.h @@ -2,13 +2,13 @@ #ifndef __ASM_VDSO_VSYSCALL_H #define __ASM_VDSO_VSYSCALL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <vdso/datapage.h> /* The asm-generic header needs to be included after the definitions above */ #include <asm-generic/vdso/vsyscall.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index f9dcaa60033d9..6f5a4574a911d 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -5,7 +5,7 @@ OBJECT_FILES_NON_STANDARD_head.o := y -extra-y := vmlinux.lds +always-$(KBUILD_BUILTIN) := vmlinux.lds obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ traps.o irq.o idle.o process.o dma.o mem.o reset.o switch.o \ diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 1120ac2824f6e..1367ca759468f 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -10,6 +10,7 @@ #include <linux/init.h> #include <linux/acpi.h> #include <linux/efi-bgrt.h> +#include <linux/export.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/memblock.h> @@ -244,22 +245,6 @@ fdt_earlycon: #ifdef CONFIG_ACPI_NUMA -static __init int setup_node(int pxm) -{ - return acpi_map_pxm_to_node(pxm); -} - -void __init numa_set_distance(int from, int to, int distance) -{ - if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { - pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - node_distances[from][to] = distance; -} - /* Callback for Proximity Domain -> CPUID mapping */ void __init acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) @@ -280,7 +265,41 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) pxm |= (pa->proximity_domain_hi[1] << 16); pxm |= (pa->proximity_domain_hi[2] << 24); } - node = setup_node(pxm); + node = acpi_map_pxm_to_node(pxm); + if (node < 0) { + pr_err("SRAT: Too many proximity domains %x\n", pxm); + bad_srat(); + return; + } + + if (pa->apic_id >= CONFIG_NR_CPUS) { + pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n", + pxm, pa->apic_id, node); + return; + } + + early_numa_add_cpu(pa->apic_id, node); + + set_cpuid_to_node(pa->apic_id, node); + node_set(node, numa_nodes_parsed); + pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node); +} + +void __init +acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) +{ + int pxm, node; + + if (srat_disabled()) + return; + if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) { + bad_srat(); + return; + } + if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) + return; + pxm = pa->proximity_domain; + node = acpi_map_pxm_to_node(pxm); if (node < 0) { pr_err("SRAT: Too many proximity domains %x\n", pxm); bad_srat(); diff --git a/arch/loongarch/kernel/alternative.c b/arch/loongarch/kernel/alternative.c index 4ad13847e9626..0e0c766df1e35 100644 --- a/arch/loongarch/kernel/alternative.c +++ b/arch/loongarch/kernel/alternative.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/export.h> #include <linux/mm.h> #include <linux/module.h> #include <asm/alternative.h> diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S index 5f23b85d78cad..ba0bdbf86aa85 100644 --- a/arch/loongarch/kernel/efi-header.S +++ b/arch/loongarch/kernel/efi-header.S @@ -7,7 +7,7 @@ #include <linux/sizes.h> .macro __EFI_PE_HEADER - .long PE_MAGIC + .long IMAGE_NT_SIGNATURE .Lcoff_header: .short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */ .short .Lsection_count /* NumberOfSections */ @@ -20,7 +20,7 @@ IMAGE_FILE_LINE_NUMS_STRIPPED /* Characteristics */ .Loptional_header: - .short PE_OPT_MAGIC_PE32PLUS /* PE32+ format */ + .short IMAGE_NT_OPTIONAL_HDR64_MAGIC /* PE32+ format */ .byte 0x02 /* MajorLinkerVersion */ .byte 0x14 /* MinorLinkerVersion */ .long __inittext_end - .Lefi_header_end /* SizeOfCode */ diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index de21e72759eeb..860a3bc030e06 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -144,6 +144,18 @@ void __init efi_init(void) if (efi_memmap_init_early(&data) < 0) panic("Unable to map EFI memory map.\n"); + /* + * Reserve the physical memory region occupied by the EFI + * memory map table (header + descriptors). This is crucial + * for kdump, as the kdump kernel relies on this original + * memmap passed by the bootloader. Without reservation, + * this region could be overwritten by the primary kernel. + * Also, set the EFI_PRESERVE_BS_REGIONS flag to indicate that + * critical boot services code/data regions like this are preserved. + */ + memblock_reserve((phys_addr_t)boot_memmap, sizeof(*tbl) + data.size); + set_bit(EFI_PRESERVE_BS_REGIONS, &efi.flags); + early_memunmap(tbl, sizeof(*tbl)); } diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c index 0fa81ced28dcd..3d98c6aa00dbb 100644 --- a/arch/loongarch/kernel/elf.c +++ b/arch/loongarch/kernel/elf.c @@ -6,7 +6,6 @@ #include <linux/binfmts.h> #include <linux/elf.h> -#include <linux/export.h> #include <linux/sched.h> #include <asm/cpu-features.h> diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S index 48e7e34e355e8..47e1db9a1ce47 100644 --- a/arch/loongarch/kernel/entry.S +++ b/arch/loongarch/kernel/entry.S @@ -73,28 +73,29 @@ SYM_CODE_START(handle_syscall) move a0, sp bl do_syscall + STACKLEAK_ERASE RESTORE_ALL_AND_RET SYM_CODE_END(handle_syscall) _ASM_NOKPROBE(handle_syscall) -SYM_CODE_START(ret_from_fork) +SYM_CODE_START(ret_from_fork_asm) UNWIND_HINT_REGS - bl schedule_tail # a0 = struct task_struct *prev - move a0, sp - bl syscall_exit_to_user_mode + move a1, sp + bl ret_from_fork + STACKLEAK_ERASE RESTORE_STATIC RESTORE_SOME RESTORE_SP_AND_RET -SYM_CODE_END(ret_from_fork) +SYM_CODE_END(ret_from_fork_asm) -SYM_CODE_START(ret_from_kernel_thread) +SYM_CODE_START(ret_from_kernel_thread_asm) UNWIND_HINT_REGS - bl schedule_tail # a0 = struct task_struct *prev - move a0, s1 - jirl ra, s0, 0 - move a0, sp - bl syscall_exit_to_user_mode + move a1, sp + move a2, s0 + move a3, s1 + bl ret_from_kernel_thread + STACKLEAK_ERASE RESTORE_STATIC RESTORE_SOME RESTORE_SP_AND_RET -SYM_CODE_END(ret_from_kernel_thread) +SYM_CODE_END(ret_from_kernel_thread_asm) diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 27144de5c5fe4..c0a5dc9aeae28 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -39,16 +39,19 @@ void __init init_environ(void) static int __init init_cpu_fullname(void) { - struct device_node *root; int cpu, ret; - char *model; + char *cpuname; + const char *model; + struct device_node *root; /* Parsing cpuname from DTS model property */ root = of_find_node_by_path("/"); - ret = of_property_read_string(root, "model", (const char **)&model); + ret = of_property_read_string(root, "model", &model); + if (ret == 0) { + cpuname = kstrdup(model, GFP_KERNEL); + loongson_sysconf.cpuname = strsep(&cpuname, " "); + } of_node_put(root); - if (ret == 0) - loongson_sysconf.cpuname = strsep(&model, " "); if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) { for (cpu = 0; cpu < NR_CPUS; cpu++) diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index 506a99a5bbc74..e3865e92a917a 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -20,7 +20,7 @@ __HEAD _head: - .word MZ_MAGIC /* "MZ", MS-DOS header */ + .word IMAGE_DOS_SIGNATURE /* "MZ", MS-DOS header */ .org 0x8 .dword _kernel_entry /* Kernel entry point (physical address) */ .dword _kernel_asize /* Kernel image effective size */ diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c index 14d7d700bcb98..72ecfed29d55a 100644 --- a/arch/loongarch/kernel/inst.c +++ b/arch/loongarch/kernel/inst.c @@ -4,6 +4,8 @@ */ #include <linux/sizes.h> #include <linux/uaccess.h> +#include <linux/set_memory.h> +#include <linux/stop_machine.h> #include <asm/cacheflush.h> #include <asm/inst.h> @@ -218,6 +220,50 @@ int larch_insn_patch_text(void *addr, u32 insn) return ret; } +struct insn_copy { + void *dst; + void *src; + size_t len; + unsigned int cpu; +}; + +static int text_copy_cb(void *data) +{ + int ret = 0; + struct insn_copy *copy = data; + + if (smp_processor_id() == copy->cpu) { + ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len); + if (ret) + pr_err("%s: operation failed\n", __func__); + } + + flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len); + + return ret; +} + +int larch_insn_text_copy(void *dst, void *src, size_t len) +{ + int ret = 0; + size_t start, end; + struct insn_copy copy = { + .dst = dst, + .src = src, + .len = len, + .cpu = smp_processor_id(), + }; + + start = round_down((size_t)dst, PAGE_SIZE); + end = round_up((size_t)dst + len, PAGE_SIZE); + + set_memory_rw(start, (end - start) / PAGE_SIZE); + ret = stop_machine(text_copy_cb, ©, cpu_online_mask); + set_memory_rox(start, (end - start) / PAGE_SIZE); + + return ret; +} + u32 larch_insn_gen_nop(void) { return INSN_NOP; @@ -323,6 +369,34 @@ u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) return insn.word; } +u32 larch_insn_gen_beq(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) +{ + union loongarch_instruction insn; + + if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) { + pr_warn("The generated beq instruction is out of range.\n"); + return INSN_BREAK; + } + + emit_beq(&insn, rj, rd, imm >> 2); + + return insn.word; +} + +u32 larch_insn_gen_bne(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) +{ + union loongarch_instruction insn; + + if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) { + pr_warn("The generated bne instruction is out of range.\n"); + return INSN_BREAK; + } + + emit_bne(&insn, rj, rd, imm >> 2); + + return insn.word; +} + u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) { union loongarch_instruction insn; diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c index 4c476904227f9..141b49bd989c6 100644 --- a/arch/loongarch/kernel/kfpu.c +++ b/arch/loongarch/kernel/kfpu.c @@ -4,6 +4,7 @@ */ #include <linux/cpu.h> +#include <linux/export.h> #include <linux/init.h> #include <asm/fpu.h> #include <asm/smp.h> diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 30a72fd528c0e..d6e73e8f9c0b6 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -11,6 +11,7 @@ #include <linux/mmzone.h> #include <linux/export.h> #include <linux/nodemask.h> +#include <linux/numa_memblks.h> #include <linux/swap.h> #include <linux/memblock.h> #include <linux/pfn.h> @@ -27,10 +28,6 @@ #include <asm/time.h> int numa_off; -unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES]; -EXPORT_SYMBOL(node_distances); - -static struct numa_meminfo numa_meminfo; cpumask_t cpus_on_node[MAX_NUMNODES]; cpumask_t phys_cpus_on_node[MAX_NUMNODES]; EXPORT_SYMBOL(cpus_on_node); @@ -43,8 +40,6 @@ s16 __cpuid_to_node[CONFIG_NR_CPUS] = { }; EXPORT_SYMBOL(__cpuid_to_node); -nodemask_t numa_nodes_parsed __initdata; - #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); @@ -145,48 +140,6 @@ void numa_remove_cpu(unsigned int cpu) cpumask_clear_cpu(cpu, &cpus_on_node[nid]); } -static int __init numa_add_memblk_to(int nid, u64 start, u64 end, - struct numa_meminfo *mi) -{ - /* ignore zero length blks */ - if (start == end) - return 0; - - /* whine about and ignore invalid blks */ - if (start > end || nid < 0 || nid >= MAX_NUMNODES) { - pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", - nid, start, end - 1); - return 0; - } - - if (mi->nr_blks >= NR_NODE_MEMBLKS) { - pr_err("NUMA: too many memblk ranges\n"); - return -EINVAL; - } - - mi->blk[mi->nr_blks].start = PFN_ALIGN(start); - mi->blk[mi->nr_blks].end = PFN_ALIGN(end - PAGE_SIZE + 1); - mi->blk[mi->nr_blks].nid = nid; - mi->nr_blks++; - return 0; -} - -/** - * numa_add_memblk - Add one numa_memblk to numa_meminfo - * @nid: NUMA node ID of the new memblk - * @start: Start address of the new memblk - * @end: End address of the new memblk - * - * Add a new memblk to the default numa_meminfo. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int __init numa_add_memblk(int nid, u64 start, u64 end) -{ - return numa_add_memblk_to(nid, start, end, &numa_meminfo); -} - static void __init node_mem_init(unsigned int node) { unsigned long start_pfn, end_pfn; @@ -205,18 +158,6 @@ static void __init node_mem_init(unsigned int node) #ifdef CONFIG_ACPI_NUMA -static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type) -{ - static unsigned long num_physpages; - - num_physpages += (size >> PAGE_SHIFT); - pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n", - node, type, start, size); - pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", - start >> PAGE_SHIFT, (start + size) >> PAGE_SHIFT, num_physpages); - memblock_set_node(start, size, &memblock.memory, node); -} - /* * add_numamem_region * @@ -228,28 +169,21 @@ static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type */ static void __init add_numamem_region(u64 start, u64 end, u32 type) { - u32 i; - u64 ofs = start; + u32 node = pa_to_nid(start); + u64 size = end - start; + static unsigned long num_physpages; if (start >= end) { pr_debug("Invalid region: %016llx-%016llx\n", start, end); return; } - for (i = 0; i < numa_meminfo.nr_blks; i++) { - struct numa_memblk *mb = &numa_meminfo.blk[i]; - - if (ofs > mb->end) - continue; - - if (end > mb->end) { - add_node_intersection(mb->nid, ofs, mb->end - ofs, type); - ofs = mb->end; - } else { - add_node_intersection(mb->nid, ofs, end - ofs, type); - break; - } - } + num_physpages += (size >> PAGE_SHIFT); + pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n", + node, type, start, size); + pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", + start >> PAGE_SHIFT, end >> PAGE_SHIFT, num_physpages); + memblock_set_node(start, size, &memblock.memory, node); } static void __init init_node_memblock(void) @@ -291,24 +225,6 @@ static void __init init_node_memblock(void) } } -static void __init numa_default_distance(void) -{ - int row, col; - - for (row = 0; row < MAX_NUMNODES; row++) - for (col = 0; col < MAX_NUMNODES; col++) { - if (col == row) - node_distances[row][col] = LOCAL_DISTANCE; - else - /* We assume that one node per package here! - * - * A SLIT should be used for multiple nodes - * per package to override default setting. - */ - node_distances[row][col] = REMOTE_DISTANCE; - } -} - /* * fake_numa_init() - For Non-ACPI systems * Return: 0 on success, -errno on failure. @@ -333,11 +249,11 @@ int __init init_numa_memory(void) for (i = 0; i < NR_CPUS; i++) set_cpuid_to_node(i, NUMA_NO_NODE); - numa_default_distance(); + numa_reset_distance(); nodes_clear(numa_nodes_parsed); nodes_clear(node_possible_map); nodes_clear(node_online_map); - memset(&numa_meminfo, 0, sizeof(numa_meminfo)); + WARN_ON(memblock_clear_hotplug(0, PHYS_ADDR_MAX)); /* Parse SRAT and SLIT if provided by firmware. */ ret = acpi_disabled ? fake_numa_init() : acpi_numa_init(); diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index e5a39bbad0780..b1b51f920b231 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/export.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/irq_work.h> diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c index f86a4b838dd78..8ad0987034889 100644 --- a/arch/loongarch/kernel/perf_event.c +++ b/arch/loongarch/kernel/perf_event.c @@ -479,8 +479,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, int idx, if (!loongarch_pmu_event_set_period(event, hwc, idx)) return; - if (perf_event_overflow(event, data, regs)) - loongarch_pmu_disable_event(idx); + perf_event_overflow(event, data, regs); } static irqreturn_t pmu_handle_irq(int irq, void *dev) diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index 6e58f65455c7c..3582f591bab28 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -13,6 +13,7 @@ #include <linux/cpu.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/entry-common.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/debug.h> @@ -34,6 +35,7 @@ #include <linux/nmi.h> #include <asm/asm.h> +#include <asm/asm-prototypes.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/elf.h> @@ -47,6 +49,7 @@ #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/reg.h> +#include <asm/switch_to.h> #include <asm/unwind.h> #include <asm/vdso.h> @@ -63,8 +66,9 @@ EXPORT_SYMBOL(__stack_chk_guard); unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(boot_option_idle_override); -asmlinkage void ret_from_fork(void); -asmlinkage void ret_from_kernel_thread(void); +asmlinkage void restore_and_ret(void); +asmlinkage void ret_from_fork_asm(void); +asmlinkage void ret_from_kernel_thread_asm(void); void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { @@ -138,6 +142,23 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } +asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev, + struct pt_regs *regs) +{ + schedule_tail(prev); + syscall_exit_to_user_mode(regs); +} + +asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev, + struct pt_regs *regs, + int (*fn)(void *), + void *fn_arg) +{ + schedule_tail(prev); + fn(fn_arg); + syscall_exit_to_user_mode(regs); +} + /* * Copy architecture-specific thread state */ @@ -165,8 +186,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) p->thread.reg03 = childksp; p->thread.reg23 = (unsigned long)args->fn; p->thread.reg24 = (unsigned long)args->fn_arg; - p->thread.reg01 = (unsigned long)ret_from_kernel_thread; - p->thread.sched_ra = (unsigned long)ret_from_kernel_thread; + p->thread.reg01 = (unsigned long)ret_from_kernel_thread_asm; + p->thread.sched_ra = (unsigned long)ret_from_kernel_thread_asm; memset(childregs, 0, sizeof(struct pt_regs)); childregs->csr_euen = p->thread.csr_euen; childregs->csr_crmd = p->thread.csr_crmd; @@ -182,8 +203,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) childregs->regs[3] = usp; p->thread.reg03 = (unsigned long) childregs; - p->thread.reg01 = (unsigned long) ret_from_fork; - p->thread.sched_ra = (unsigned long) ret_from_fork; + p->thread.reg01 = (unsigned long) ret_from_fork_asm; + p->thread.sched_ra = (unsigned long) ret_from_fork_asm; /* * New tasks lose permission to use the fpu. This accelerates context diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index 5e2402cfcab0a..8edd0954e55ab 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -864,7 +864,7 @@ enum loongarch_regset { static const struct user_regset loongarch64_regsets[] = { [REGSET_GPR] = { - .core_note_type = NT_PRSTATUS, + USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG, .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), @@ -872,7 +872,7 @@ static const struct user_regset loongarch64_regsets[] = { .set = gpr_set, }, [REGSET_FPR] = { - .core_note_type = NT_PRFPREG, + USER_REGSET_NOTE_TYPE(PRFPREG), .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), @@ -880,7 +880,7 @@ static const struct user_regset loongarch64_regsets[] = { .set = fpr_set, }, [REGSET_CPUCFG] = { - .core_note_type = NT_LOONGARCH_CPUCFG, + USER_REGSET_NOTE_TYPE(LOONGARCH_CPUCFG), .n = 64, .size = sizeof(u32), .align = sizeof(u32), @@ -889,7 +889,7 @@ static const struct user_regset loongarch64_regsets[] = { }, #ifdef CONFIG_CPU_HAS_LSX [REGSET_LSX] = { - .core_note_type = NT_LOONGARCH_LSX, + USER_REGSET_NOTE_TYPE(LOONGARCH_LSX), .n = NUM_FPU_REGS, .size = 16, .align = 16, @@ -899,7 +899,7 @@ static const struct user_regset loongarch64_regsets[] = { #endif #ifdef CONFIG_CPU_HAS_LASX [REGSET_LASX] = { - .core_note_type = NT_LOONGARCH_LASX, + USER_REGSET_NOTE_TYPE(LOONGARCH_LASX), .n = NUM_FPU_REGS, .size = 32, .align = 32, @@ -909,7 +909,7 @@ static const struct user_regset loongarch64_regsets[] = { #endif #ifdef CONFIG_CPU_HAS_LBT [REGSET_LBT] = { - .core_note_type = NT_LOONGARCH_LBT, + USER_REGSET_NOTE_TYPE(LOONGARCH_LBT), .n = 5, .size = sizeof(u64), .align = sizeof(u64), @@ -919,7 +919,7 @@ static const struct user_regset loongarch64_regsets[] = { #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT [REGSET_HW_BREAK] = { - .core_note_type = NT_LOONGARCH_HW_BREAK, + USER_REGSET_NOTE_TYPE(LOONGARCH_HW_BREAK), .n = sizeof(struct user_watch_state_v2) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), @@ -927,7 +927,7 @@ static const struct user_regset loongarch64_regsets[] = { .set = hw_break_set, }, [REGSET_HW_WATCH] = { - .core_note_type = NT_LOONGARCH_HW_WATCH, + USER_REGSET_NOTE_TYPE(LOONGARCH_HW_WATCH), .n = sizeof(struct user_watch_state_v2) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S index 84e6de2fd9735..8b5140ac9ea11 100644 --- a/arch/loongarch/kernel/relocate_kernel.S +++ b/arch/loongarch/kernel/relocate_kernel.S @@ -109,4 +109,4 @@ SYM_CODE_END(kexec_smp_wait) relocate_new_kernel_end: .section ".data" -SYM_DATA(relocate_new_kernel_size, .long relocate_new_kernel_end - relocate_new_kernel) +SYM_DATA(relocate_new_kernel_size, .quad relocate_new_kernel_end - relocate_new_kernel) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index b99fbb388fe03..075b79b2c1d39 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -191,6 +191,16 @@ static int __init early_parse_mem(char *p) return -EINVAL; } + start = 0; + size = memparse(p, &p); + if (*p == '@') /* Every mem=... should contain '@' */ + start = memparse(p + 1, &p); + else { /* Only one mem=... is allowed if no '@' */ + usermem = 1; + memblock_enforce_memory_limit(size); + return 0; + } + /* * If a user specifies memory size, we * blow away any automatically generated @@ -201,14 +211,6 @@ static int __init early_parse_mem(char *p) memblock_remove(memblock_start_of_DRAM(), memblock_end_of_DRAM() - memblock_start_of_DRAM()); } - start = 0; - size = memparse(p, &p); - if (*p == '@') - start = memparse(p + 1, &p); - else { - pr_err("Invalid format!\n"); - return -EINVAL; - } if (!IS_ENABLED(CONFIG_NUMA)) memblock_add(start, size); @@ -265,7 +267,7 @@ static void __init arch_reserve_crashkernel(void) return; ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), - &crash_size, &crash_base, &low_size, &high); + &crash_size, &crash_base, &low_size, NULL, &high); if (ret) return; diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 4b24589c0b565..46036d98da75b 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -46,6 +46,10 @@ EXPORT_SYMBOL(__cpu_logical_map); cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_sibling_map); +/* Representing the last level cache shared map of each logical CPU */ +cpumask_t cpu_llc_shared_map[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(cpu_llc_shared_map); + /* Representing the core map of multi-core chips of each logical CPU */ cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); @@ -63,6 +67,9 @@ EXPORT_SYMBOL(cpu_foreign_map); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; +/* representing cpus for which llc shared maps can be computed */ +static cpumask_t cpu_llc_shared_setup_map; + /* representing cpus for which core maps can be computed */ static cpumask_t cpu_core_setup_map; @@ -102,6 +109,34 @@ static inline void set_cpu_core_map(int cpu) } } +static inline void set_cpu_llc_shared_map(int cpu) +{ + int i; + + cpumask_set_cpu(cpu, &cpu_llc_shared_setup_map); + + for_each_cpu(i, &cpu_llc_shared_setup_map) { + if (cpu_to_node(cpu) == cpu_to_node(i)) { + cpumask_set_cpu(i, &cpu_llc_shared_map[cpu]); + cpumask_set_cpu(cpu, &cpu_llc_shared_map[i]); + } + } +} + +static inline void clear_cpu_llc_shared_map(int cpu) +{ + int i; + + for_each_cpu(i, &cpu_llc_shared_setup_map) { + if (cpu_to_node(cpu) == cpu_to_node(i)) { + cpumask_clear_cpu(i, &cpu_llc_shared_map[cpu]); + cpumask_clear_cpu(cpu, &cpu_llc_shared_map[i]); + } + } + + cpumask_clear_cpu(cpu, &cpu_llc_shared_setup_map); +} + static inline void set_cpu_sibling_map(int cpu) { int i; @@ -406,6 +441,7 @@ int loongson_cpu_disable(void) #endif set_cpu_online(cpu, false); clear_cpu_sibling_map(cpu); + clear_cpu_llc_shared_map(cpu); calculate_cpu_foreign_map(); local_irq_save(flags); irq_migrate_all_off_this_cpu(); @@ -572,6 +608,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) current_thread_info()->cpu = 0; loongson_prepare_cpus(max_cpus); set_cpu_sibling_map(0); + set_cpu_llc_shared_map(0); set_cpu_core_map(0); calculate_cpu_foreign_map(); #ifndef CONFIG_HOTPLUG_CPU @@ -613,6 +650,7 @@ asmlinkage void start_secondary(void) loongson_init_secondary(); set_cpu_sibling_map(cpu); + set_cpu_llc_shared_map(cpu); set_cpu_core_map(cpu); notify_cpu_starting(cpu); diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index bc75a3a69fc8d..367906b10f810 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -102,7 +102,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev return 0; } -static unsigned long __init get_loops_per_jiffy(void) +static unsigned long get_loops_per_jiffy(void) { unsigned long lpj = (unsigned long)const_clock_freq; diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index 47fc2de6d1501..3d9be6ca7ec55 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/kexec.h> #include <linux/module.h> +#include <linux/export.h> #include <linux/extable.h> #include <linux/mm.h> #include <linux/sched/mm.h> diff --git a/arch/loongarch/kernel/unwind_guess.c b/arch/loongarch/kernel/unwind_guess.c index 98379b7d41475..08d7951b2f604 100644 --- a/arch/loongarch/kernel/unwind_guess.c +++ b/arch/loongarch/kernel/unwind_guess.c @@ -3,6 +3,7 @@ * Copyright (C) 2022 Loongson Technology Corporation Limited */ #include <asm/unwind.h> +#include <linux/export.h> unsigned long unwind_get_return_address(struct unwind_state *state) { diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c index d623935a75471..0d5fa64a22252 100644 --- a/arch/loongarch/kernel/unwind_orc.c +++ b/arch/loongarch/kernel/unwind_orc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only -#include <linux/objtool.h> +#include <linux/export.h> #include <linux/module.h> +#include <linux/objtool.h> #include <linux/sort.h> #include <asm/exception.h> #include <asm/orc_header.h> @@ -507,7 +508,7 @@ bool unwind_next_frame(struct unwind_state *state) state->pc = bt_address(pc); if (!state->pc) { - pr_err("cannot find unwind pc at %pK\n", (void *)pc); + pr_err("cannot find unwind pc at %p\n", (void *)pc); goto err; } diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c index 929ae240280a5..729e775bd40dd 100644 --- a/arch/loongarch/kernel/unwind_prologue.c +++ b/arch/loongarch/kernel/unwind_prologue.c @@ -3,6 +3,7 @@ * Copyright (C) 2022 Loongson Technology Corporation Limited */ #include <linux/cpumask.h> +#include <linux/export.h> #include <linux/ftrace.h> #include <linux/kallsyms.h> diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c index 10cf1608c7b32..7b888d9085a01 100644 --- a/arch/loongarch/kernel/vdso.c +++ b/arch/loongarch/kernel/vdso.c @@ -105,7 +105,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_addr = data_addr + VVAR_SIZE; vma = _install_special_mapping(mm, vdso_addr, info->size, - VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | + VM_SEALED_SYSMAP, &info->code_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index ea321403644ad..2ce41f93b2a44 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -289,9 +289,11 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) er = EMULATE_FAIL; switch (((inst.word >> 24) & 0xff)) { case 0x0: /* CPUCFG GSPR */ + trace_kvm_exit_cpucfg(vcpu, KVM_TRACE_EXIT_CPUCFG); er = kvm_emu_cpucfg(vcpu, inst); break; case 0x4: /* CSR{RD,WR,XCHG} GSPR */ + trace_kvm_exit_csr(vcpu, KVM_TRACE_EXIT_CSR); er = kvm_handle_csr(vcpu, inst); break; case 0x6: /* Cache, Idle and IOCSR GSPR */ @@ -341,7 +343,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) * 2) Execute CACOP/IDLE instructions; * 3) Access to unimplemented CSRs/IOCSRs. */ -static int kvm_handle_gspr(struct kvm_vcpu *vcpu) +static int kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode) { int ret = RESUME_GUEST; enum emulation_result er = EMULATE_DONE; @@ -661,7 +663,7 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) return ret; } -static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) +static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write, int ecode) { int ret; larch_inst inst; @@ -675,7 +677,7 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) return RESUME_GUEST; } - ret = kvm_handle_mm_fault(vcpu, badv, write); + ret = kvm_handle_mm_fault(vcpu, badv, write, ecode); if (ret) { /* Treat as MMIO */ inst.word = vcpu->arch.badi; @@ -705,14 +707,14 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) return ret; } -static int kvm_handle_read_fault(struct kvm_vcpu *vcpu) +static int kvm_handle_read_fault(struct kvm_vcpu *vcpu, int ecode) { - return kvm_handle_rdwr_fault(vcpu, false); + return kvm_handle_rdwr_fault(vcpu, false, ecode); } -static int kvm_handle_write_fault(struct kvm_vcpu *vcpu) +static int kvm_handle_write_fault(struct kvm_vcpu *vcpu, int ecode) { - return kvm_handle_rdwr_fault(vcpu, true); + return kvm_handle_rdwr_fault(vcpu, true, ecode); } int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run) @@ -726,11 +728,12 @@ int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run) /** * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host * @vcpu: Virtual CPU context. + * @ecode: Exception code. * * Handle when the guest attempts to use fpu which hasn't been allowed * by the root context. */ -static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode) { struct kvm_run *run = vcpu->run; @@ -783,11 +786,12 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu) /* * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. * @vcpu: Virtual CPU context. + * @ecode: Exception code. * * Handle when the guest attempts to use LSX when it is disabled in the root * context. */ -static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode) { if (kvm_own_lsx(vcpu)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); @@ -798,11 +802,12 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) /* * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root. * @vcpu: Virtual CPU context. + * @ecode: Exception code. * * Handle when the guest attempts to use LASX when it is disabled in the root * context. */ -static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode) { if (kvm_own_lasx(vcpu)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); @@ -810,7 +815,7 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } -static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode) { if (kvm_own_lbt(vcpu)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); @@ -818,32 +823,25 @@ static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } -static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu) +static void kvm_send_pv_ipi(struct kvm_vcpu *vcpu) { - unsigned int min, cpu, i; - unsigned long ipi_bitmap; + unsigned int min, cpu; struct kvm_vcpu *dest; + DECLARE_BITMAP(ipi_bitmap, BITS_PER_LONG * 2) = { + kvm_read_reg(vcpu, LOONGARCH_GPR_A1), + kvm_read_reg(vcpu, LOONGARCH_GPR_A2) + }; min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3); - for (i = 0; i < 2; i++, min += BITS_PER_LONG) { - ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i); - if (!ipi_bitmap) + for_each_set_bit(cpu, ipi_bitmap, BITS_PER_LONG * 2) { + dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min); + if (!dest) continue; - cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG); - while (cpu < BITS_PER_LONG) { - dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min); - cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1); - if (!dest) - continue; - - /* Send SWI0 to dest vcpu to emulate IPI interrupt */ - kvm_queue_irq(dest, INT_SWI0); - kvm_vcpu_kick(dest); - } + /* Send SWI0 to dest vcpu to emulate IPI interrupt */ + kvm_queue_irq(dest, INT_SWI0); + kvm_vcpu_kick(dest); } - - return 0; } /* @@ -872,7 +870,7 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu) kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret); } -static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) +static int kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode) { int ret; larch_inst inst; @@ -932,16 +930,14 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) /* * LoongArch KVM callback handling for unimplemented guest exiting */ -static int kvm_fault_ni(struct kvm_vcpu *vcpu) +static int kvm_fault_ni(struct kvm_vcpu *vcpu, int ecode) { - unsigned int ecode, inst; - unsigned long estat, badv; + unsigned int inst; + unsigned long badv; /* Fetch the instruction */ inst = vcpu->arch.badi; badv = vcpu->arch.badv; - estat = vcpu->arch.host_estat; - ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n", ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat()); kvm_arch_vcpu_dump_regs(vcpu); @@ -966,5 +962,5 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault) { - return kvm_fault_tables[fault](vcpu); + return kvm_fault_tables[fault](vcpu, fault); } diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c index f39929d7bf8a2..a3a12af9ecbff 100644 --- a/arch/loongarch/kvm/intc/eiointc.c +++ b/arch/loongarch/kvm/intc/eiointc.c @@ -9,7 +9,8 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s) { - int ipnum, cpu, irq_index, irq_mask, irq; + int ipnum, cpu, cpuid, irq; + struct kvm_vcpu *vcpu; for (irq = 0; irq < EIOINTC_IRQS; irq++) { ipnum = s->ipmap.reg_u8[irq / 32]; @@ -17,20 +18,23 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s) ipnum = count_trailing_zeros(ipnum); ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; } - irq_index = irq / 32; - irq_mask = BIT(irq & 0x1f); - cpu = s->coremap.reg_u8[irq]; - if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask)) - set_bit(irq, s->sw_coreisr[cpu][ipnum]); + cpuid = s->coremap.reg_u8[irq]; + vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid); + if (!vcpu) + continue; + + cpu = vcpu->vcpu_id; + if (test_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu])) + __set_bit(irq, s->sw_coreisr[cpu][ipnum]); else - clear_bit(irq, s->sw_coreisr[cpu][ipnum]); + __clear_bit(irq, s->sw_coreisr[cpu][ipnum]); } } static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level) { - int ipnum, cpu, found, irq_index, irq_mask; + int ipnum, cpu, found; struct kvm_vcpu *vcpu; struct kvm_interrupt vcpu_irq; @@ -42,19 +46,16 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level) cpu = s->sw_coremap[irq]; vcpu = kvm_get_vcpu(s->kvm, cpu); - irq_index = irq / 32; - irq_mask = BIT(irq & 0x1f); - if (level) { /* if not enable return false */ - if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0) + if (!test_bit(irq, (unsigned long *)s->enable.reg_u32)) return; - s->coreisr.reg_u32[cpu][irq_index] |= irq_mask; + __set_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]); found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS); - set_bit(irq, s->sw_coreisr[cpu][ipnum]); + __set_bit(irq, s->sw_coreisr[cpu][ipnum]); } else { - s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask; - clear_bit(irq, s->sw_coreisr[cpu][ipnum]); + __clear_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]); + __clear_bit(irq, s->sw_coreisr[cpu][ipnum]); found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS); } @@ -66,20 +67,25 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level) } static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s, - int irq, void *pvalue, u32 len, bool notify) + int irq, u64 val, u32 len, bool notify) { - int i, cpu; - u64 val = *(u64 *)pvalue; + int i, cpu, cpuid; + struct kvm_vcpu *vcpu; for (i = 0; i < len; i++) { - cpu = val & 0xff; + cpuid = val & 0xff; val = val >> 8; if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) { - cpu = ffs(cpu) - 1; - cpu = (cpu >= 4) ? 0 : cpu; + cpuid = ffs(cpuid) - 1; + cpuid = (cpuid >= 4) ? 0 : cpuid; } + vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid); + if (!vcpu) + continue; + + cpu = vcpu->vcpu_id; if (s->sw_coremap[irq + i] == cpu) continue; @@ -99,159 +105,14 @@ void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level) unsigned long flags; unsigned long *isr = (unsigned long *)s->isr.reg_u8; - level ? set_bit(irq, isr) : clear_bit(irq, isr); spin_lock_irqsave(&s->lock, flags); + level ? __set_bit(irq, isr) : __clear_bit(irq, isr); eiointc_update_irq(s, irq, level); spin_unlock_irqrestore(&s->lock, flags); } -static inline void eiointc_enable_irq(struct kvm_vcpu *vcpu, - struct loongarch_eiointc *s, int index, u8 mask, int level) -{ - u8 val; - int irq; - - val = mask & s->isr.reg_u8[index]; - irq = ffs(val); - while (irq != 0) { - /* - * enable bit change from 0 to 1, - * need to update irq by pending bits - */ - eiointc_update_irq(s, irq - 1 + index * 8, level); - val &= ~BIT(irq - 1); - irq = ffs(val); - } -} - -static int loongarch_eiointc_readb(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, - gpa_t addr, int len, void *val) -{ - int index, ret = 0; - u8 data = 0; - gpa_t offset; - - offset = addr - EIOINTC_BASE; - switch (offset) { - case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: - index = offset - EIOINTC_NODETYPE_START; - data = s->nodetype.reg_u8[index]; - break; - case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: - index = offset - EIOINTC_IPMAP_START; - data = s->ipmap.reg_u8[index]; - break; - case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: - index = offset - EIOINTC_ENABLE_START; - data = s->enable.reg_u8[index]; - break; - case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: - index = offset - EIOINTC_BOUNCE_START; - data = s->bounce.reg_u8[index]; - break; - case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: - index = offset - EIOINTC_COREISR_START; - data = s->coreisr.reg_u8[vcpu->vcpu_id][index]; - break; - case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: - index = offset - EIOINTC_COREMAP_START; - data = s->coremap.reg_u8[index]; - break; - default: - ret = -EINVAL; - break; - } - *(u8 *)val = data; - - return ret; -} - -static int loongarch_eiointc_readw(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, - gpa_t addr, int len, void *val) -{ - int index, ret = 0; - u16 data = 0; - gpa_t offset; - - offset = addr - EIOINTC_BASE; - switch (offset) { - case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: - index = (offset - EIOINTC_NODETYPE_START) >> 1; - data = s->nodetype.reg_u16[index]; - break; - case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: - index = (offset - EIOINTC_IPMAP_START) >> 1; - data = s->ipmap.reg_u16[index]; - break; - case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: - index = (offset - EIOINTC_ENABLE_START) >> 1; - data = s->enable.reg_u16[index]; - break; - case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: - index = (offset - EIOINTC_BOUNCE_START) >> 1; - data = s->bounce.reg_u16[index]; - break; - case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: - index = (offset - EIOINTC_COREISR_START) >> 1; - data = s->coreisr.reg_u16[vcpu->vcpu_id][index]; - break; - case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: - index = (offset - EIOINTC_COREMAP_START) >> 1; - data = s->coremap.reg_u16[index]; - break; - default: - ret = -EINVAL; - break; - } - *(u16 *)val = data; - - return ret; -} - -static int loongarch_eiointc_readl(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, - gpa_t addr, int len, void *val) -{ - int index, ret = 0; - u32 data = 0; - gpa_t offset; - - offset = addr - EIOINTC_BASE; - switch (offset) { - case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: - index = (offset - EIOINTC_NODETYPE_START) >> 2; - data = s->nodetype.reg_u32[index]; - break; - case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: - index = (offset - EIOINTC_IPMAP_START) >> 2; - data = s->ipmap.reg_u32[index]; - break; - case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: - index = (offset - EIOINTC_ENABLE_START) >> 2; - data = s->enable.reg_u32[index]; - break; - case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: - index = (offset - EIOINTC_BOUNCE_START) >> 2; - data = s->bounce.reg_u32[index]; - break; - case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: - index = (offset - EIOINTC_COREISR_START) >> 2; - data = s->coreisr.reg_u32[vcpu->vcpu_id][index]; - break; - case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: - index = (offset - EIOINTC_COREMAP_START) >> 2; - data = s->coremap.reg_u32[index]; - break; - default: - ret = -EINVAL; - break; - } - *(u32 *)val = data; - - return ret; -} - -static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, - gpa_t addr, int len, void *val) +static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, + gpa_t addr, unsigned long *val) { int index, ret = 0; u64 data = 0; @@ -287,7 +148,7 @@ static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eioin ret = -EINVAL; break; } - *(u64 *)val = data; + *val = data; return ret; } @@ -297,7 +158,7 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val) { int ret = -EINVAL; - unsigned long flags; + unsigned long flags, data, offset; struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; if (!eiointc) { @@ -305,358 +166,120 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, return -EINVAL; } - vcpu->kvm->stat.eiointc_read_exits++; + if (addr & (len - 1)) { + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); + return -EINVAL; + } + + offset = addr & 0x7; + addr -= offset; + vcpu->stat.eiointc_read_exits++; spin_lock_irqsave(&eiointc->lock, flags); + ret = loongarch_eiointc_read(vcpu, eiointc, addr, &data); + spin_unlock_irqrestore(&eiointc->lock, flags); + if (ret) + return ret; + + data = data >> (offset * 8); switch (len) { case 1: - ret = loongarch_eiointc_readb(vcpu, eiointc, addr, len, val); + *(long *)val = (s8)data; break; case 2: - ret = loongarch_eiointc_readw(vcpu, eiointc, addr, len, val); + *(long *)val = (s16)data; break; case 4: - ret = loongarch_eiointc_readl(vcpu, eiointc, addr, len, val); - break; - case 8: - ret = loongarch_eiointc_readq(vcpu, eiointc, addr, len, val); + *(long *)val = (s32)data; break; default: - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", - __func__, addr, len); - } - spin_unlock_irqrestore(&eiointc->lock, flags); - - return ret; -} - -static int loongarch_eiointc_writeb(struct kvm_vcpu *vcpu, - struct loongarch_eiointc *s, - gpa_t addr, int len, const void *val) -{ - int index, irq, bits, ret = 0; - u8 cpu; - u8 data, old_data; - u8 coreisr, old_coreisr; - gpa_t offset; - - data = *(u8 *)val; - offset = addr - EIOINTC_BASE; - - switch (offset) { - case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: - index = (offset - EIOINTC_NODETYPE_START); - s->nodetype.reg_u8[index] = data; - break; - case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: - /* - * ipmap cannot be set at runtime, can be set only at the beginning - * of irqchip driver, need not update upper irq level - */ - index = (offset - EIOINTC_IPMAP_START); - s->ipmap.reg_u8[index] = data; - break; - case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: - index = (offset - EIOINTC_ENABLE_START); - old_data = s->enable.reg_u8[index]; - s->enable.reg_u8[index] = data; - /* - * 1: enable irq. - * update irq when isr is set. - */ - data = s->enable.reg_u8[index] & ~old_data & s->isr.reg_u8[index]; - eiointc_enable_irq(vcpu, s, index, data, 1); - /* - * 0: disable irq. - * update irq when isr is set. - */ - data = ~s->enable.reg_u8[index] & old_data & s->isr.reg_u8[index]; - eiointc_enable_irq(vcpu, s, index, data, 0); - break; - case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: - /* do not emulate hw bounced irq routing */ - index = offset - EIOINTC_BOUNCE_START; - s->bounce.reg_u8[index] = data; - break; - case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: - index = (offset - EIOINTC_COREISR_START); - /* use attrs to get current cpu index */ - cpu = vcpu->vcpu_id; - coreisr = data; - old_coreisr = s->coreisr.reg_u8[cpu][index]; - /* write 1 to clear interrupt */ - s->coreisr.reg_u8[cpu][index] = old_coreisr & ~coreisr; - coreisr &= old_coreisr; - bits = sizeof(data) * 8; - irq = find_first_bit((void *)&coreisr, bits); - while (irq < bits) { - eiointc_update_irq(s, irq + index * bits, 0); - bitmap_clear((void *)&coreisr, irq, 1); - irq = find_first_bit((void *)&coreisr, bits); - } - break; - case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: - irq = offset - EIOINTC_COREMAP_START; - index = irq; - s->coremap.reg_u8[index] = data; - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); - break; - default: - ret = -EINVAL; + *(long *)val = (long)data; break; } - return ret; -} - -static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu, - struct loongarch_eiointc *s, - gpa_t addr, int len, const void *val) -{ - int i, index, irq, bits, ret = 0; - u8 cpu; - u16 data, old_data; - u16 coreisr, old_coreisr; - gpa_t offset; - - data = *(u16 *)val; - offset = addr - EIOINTC_BASE; - - switch (offset) { - case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: - index = (offset - EIOINTC_NODETYPE_START) >> 1; - s->nodetype.reg_u16[index] = data; - break; - case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: - /* - * ipmap cannot be set at runtime, can be set only at the beginning - * of irqchip driver, need not update upper irq level - */ - index = (offset - EIOINTC_IPMAP_START) >> 1; - s->ipmap.reg_u16[index] = data; - break; - case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: - index = (offset - EIOINTC_ENABLE_START) >> 1; - old_data = s->enable.reg_u32[index]; - s->enable.reg_u16[index] = data; - /* - * 1: enable irq. - * update irq when isr is set. - */ - data = s->enable.reg_u16[index] & ~old_data & s->isr.reg_u16[index]; - index = index << 1; - for (i = 0; i < sizeof(data); i++) { - u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index + i, mask, 1); - } - /* - * 0: disable irq. - * update irq when isr is set. - */ - data = ~s->enable.reg_u16[index] & old_data & s->isr.reg_u16[index]; - for (i = 0; i < sizeof(data); i++) { - u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index, mask, 0); - } - break; - case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: - /* do not emulate hw bounced irq routing */ - index = (offset - EIOINTC_BOUNCE_START) >> 1; - s->bounce.reg_u16[index] = data; - break; - case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: - index = (offset - EIOINTC_COREISR_START) >> 1; - /* use attrs to get current cpu index */ - cpu = vcpu->vcpu_id; - coreisr = data; - old_coreisr = s->coreisr.reg_u16[cpu][index]; - /* write 1 to clear interrupt */ - s->coreisr.reg_u16[cpu][index] = old_coreisr & ~coreisr; - coreisr &= old_coreisr; - bits = sizeof(data) * 8; - irq = find_first_bit((void *)&coreisr, bits); - while (irq < bits) { - eiointc_update_irq(s, irq + index * bits, 0); - bitmap_clear((void *)&coreisr, irq, 1); - irq = find_first_bit((void *)&coreisr, bits); - } - break; - case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: - irq = offset - EIOINTC_COREMAP_START; - index = irq >> 1; - s->coremap.reg_u16[index] = data; - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); - break; - default: - ret = -EINVAL; - break; - } - - return ret; + return 0; } -static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu, +static int loongarch_eiointc_write(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, - gpa_t addr, int len, const void *val) + gpa_t addr, u64 value, u64 field_mask) { - int i, index, irq, bits, ret = 0; + int index, irq, ret = 0; u8 cpu; - u32 data, old_data; - u32 coreisr, old_coreisr; + u64 data, old, mask; gpa_t offset; - data = *(u32 *)val; - offset = addr - EIOINTC_BASE; + offset = addr & 7; + mask = field_mask << (offset * 8); + data = (value & field_mask) << (offset * 8); - switch (offset) { - case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: - index = (offset - EIOINTC_NODETYPE_START) >> 2; - s->nodetype.reg_u32[index] = data; - break; - case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: - /* - * ipmap cannot be set at runtime, can be set only at the beginning - * of irqchip driver, need not update upper irq level - */ - index = (offset - EIOINTC_IPMAP_START) >> 2; - s->ipmap.reg_u32[index] = data; - break; - case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: - index = (offset - EIOINTC_ENABLE_START) >> 2; - old_data = s->enable.reg_u32[index]; - s->enable.reg_u32[index] = data; - /* - * 1: enable irq. - * update irq when isr is set. - */ - data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index]; - index = index << 2; - for (i = 0; i < sizeof(data); i++) { - u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index + i, mask, 1); - } - /* - * 0: disable irq. - * update irq when isr is set. - */ - data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index]; - for (i = 0; i < sizeof(data); i++) { - u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index, mask, 0); - } - break; - case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: - /* do not emulate hw bounced irq routing */ - index = (offset - EIOINTC_BOUNCE_START) >> 2; - s->bounce.reg_u32[index] = data; - break; - case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: - index = (offset - EIOINTC_COREISR_START) >> 2; - /* use attrs to get current cpu index */ - cpu = vcpu->vcpu_id; - coreisr = data; - old_coreisr = s->coreisr.reg_u32[cpu][index]; - /* write 1 to clear interrupt */ - s->coreisr.reg_u32[cpu][index] = old_coreisr & ~coreisr; - coreisr &= old_coreisr; - bits = sizeof(data) * 8; - irq = find_first_bit((void *)&coreisr, bits); - while (irq < bits) { - eiointc_update_irq(s, irq + index * bits, 0); - bitmap_clear((void *)&coreisr, irq, 1); - irq = find_first_bit((void *)&coreisr, bits); - } - break; - case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: - irq = offset - EIOINTC_COREMAP_START; - index = irq >> 2; - s->coremap.reg_u32[index] = data; - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu, - struct loongarch_eiointc *s, - gpa_t addr, int len, const void *val) -{ - int i, index, irq, bits, ret = 0; - u8 cpu; - u64 data, old_data; - u64 coreisr, old_coreisr; - gpa_t offset; - - data = *(u64 *)val; + addr -= offset; offset = addr - EIOINTC_BASE; switch (offset) { case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: index = (offset - EIOINTC_NODETYPE_START) >> 3; - s->nodetype.reg_u64[index] = data; + old = s->nodetype.reg_u64[index]; + s->nodetype.reg_u64[index] = (old & ~mask) | data; break; case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: /* * ipmap cannot be set at runtime, can be set only at the beginning * of irqchip driver, need not update upper irq level */ - index = (offset - EIOINTC_IPMAP_START) >> 3; - s->ipmap.reg_u64 = data; + old = s->ipmap.reg_u64; + s->ipmap.reg_u64 = (old & ~mask) | data; break; case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: index = (offset - EIOINTC_ENABLE_START) >> 3; - old_data = s->enable.reg_u64[index]; - s->enable.reg_u64[index] = data; + old = s->enable.reg_u64[index]; + s->enable.reg_u64[index] = (old & ~mask) | data; /* * 1: enable irq. * update irq when isr is set. */ - data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index]; - index = index << 3; - for (i = 0; i < sizeof(data); i++) { - u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index + i, mask, 1); + data = s->enable.reg_u64[index] & ~old & s->isr.reg_u64[index]; + while (data) { + irq = __ffs(data); + eiointc_update_irq(s, irq + index * 64, 1); + data &= ~BIT_ULL(irq); } /* * 0: disable irq. * update irq when isr is set. */ - data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index]; - for (i = 0; i < sizeof(data); i++) { - u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index, mask, 0); + data = ~s->enable.reg_u64[index] & old & s->isr.reg_u64[index]; + while (data) { + irq = __ffs(data); + eiointc_update_irq(s, irq + index * 64, 0); + data &= ~BIT_ULL(irq); } break; case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: /* do not emulate hw bounced irq routing */ index = (offset - EIOINTC_BOUNCE_START) >> 3; - s->bounce.reg_u64[index] = data; + old = s->bounce.reg_u64[index]; + s->bounce.reg_u64[index] = (old & ~mask) | data; break; case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: index = (offset - EIOINTC_COREISR_START) >> 3; /* use attrs to get current cpu index */ cpu = vcpu->vcpu_id; - coreisr = data; - old_coreisr = s->coreisr.reg_u64[cpu][index]; + old = s->coreisr.reg_u64[cpu][index]; /* write 1 to clear interrupt */ - s->coreisr.reg_u64[cpu][index] = old_coreisr & ~coreisr; - coreisr &= old_coreisr; - bits = sizeof(data) * 8; - irq = find_first_bit((void *)&coreisr, bits); - while (irq < bits) { - eiointc_update_irq(s, irq + index * bits, 0); - bitmap_clear((void *)&coreisr, irq, 1); - irq = find_first_bit((void *)&coreisr, bits); + s->coreisr.reg_u64[cpu][index] = old & ~data; + data &= old; + while (data) { + irq = __ffs(data); + eiointc_update_irq(s, irq + index * 64, 0); + data &= ~BIT_ULL(irq); } break; case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: - irq = offset - EIOINTC_COREMAP_START; - index = irq >> 3; - s->coremap.reg_u64[index] = data; - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); + index = (offset - EIOINTC_COREMAP_START) >> 3; + old = s->coremap.reg_u64[index]; + s->coremap.reg_u64[index] = (old & ~mask) | data; + data = s->coremap.reg_u64[index]; + eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true); break; default: ret = -EINVAL; @@ -671,7 +294,7 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val) { int ret = -EINVAL; - unsigned long flags; + unsigned long flags, value; struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; if (!eiointc) { @@ -679,24 +302,30 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, return -EINVAL; } - vcpu->kvm->stat.eiointc_write_exits++; + if (addr & (len - 1)) { + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); + return -EINVAL; + } + + vcpu->stat.eiointc_write_exits++; spin_lock_irqsave(&eiointc->lock, flags); switch (len) { case 1: - ret = loongarch_eiointc_writeb(vcpu, eiointc, addr, len, val); + value = *(unsigned char *)val; + ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF); break; case 2: - ret = loongarch_eiointc_writew(vcpu, eiointc, addr, len, val); + value = *(unsigned short *)val; + ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX); break; case 4: - ret = loongarch_eiointc_writel(vcpu, eiointc, addr, len, val); - break; - case 8: - ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, len, val); + value = *(unsigned int *)val; + ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX); break; default: - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", - __func__, addr, len); + value = *(unsigned long *)val; + ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX); + break; } spin_unlock_irqrestore(&eiointc->lock, flags); @@ -787,7 +416,7 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev, int ret = 0; unsigned long flags; unsigned long type = (unsigned long)attr->attr; - u32 i, start_irq; + u32 i, start_irq, val; void __user *data; struct loongarch_eiointc *s = dev->kvm->arch.eiointc; @@ -795,8 +424,14 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev, spin_lock_irqsave(&s->lock, flags); switch (type) { case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: - if (copy_from_user(&s->num_cpu, data, 4)) + if (copy_from_user(&val, data, 4)) ret = -EFAULT; + else { + if (val >= EIOINTC_ROUTE_MAX_VCPUS) + ret = -EINVAL; + else + s->num_cpu = val; + } break; case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: if (copy_from_user(&s->features, data, 4)) @@ -809,7 +444,7 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev, for (i = 0; i < (EIOINTC_IRQS / 4); i++) { start_irq = i * 4; eiointc_update_sw_coremap(s, start_irq, - (void *)&s->coremap.reg_u32[i], sizeof(u32), false); + s->coremap.reg_u32[i], sizeof(u32), false); } break; default: @@ -824,7 +459,7 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev, struct kvm_device_attr *attr, bool is_write) { - int addr, cpuid, offset, ret = 0; + int addr, cpu, offset, ret = 0; unsigned long flags; void *p = NULL; void __user *data; @@ -832,7 +467,7 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev, s = dev->kvm->arch.eiointc; addr = attr->attr; - cpuid = addr >> 16; + cpu = addr >> 16; addr &= 0xffff; data = (void __user *)attr->addr; switch (addr) { @@ -857,8 +492,11 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev, p = &s->isr.reg_u32[offset]; break; case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: + if (cpu >= s->num_cpu) + return -EINVAL; + offset = (addr - EIOINTC_COREISR_START) / 4; - p = &s->coreisr.reg_u32[cpuid][offset]; + p = &s->coreisr.reg_u32[cpu][offset]; break; case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: offset = (addr - EIOINTC_COREMAP_START) / 4; @@ -899,9 +537,15 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev, data = (void __user *)attr->addr; switch (addr) { case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU: + if (is_write) + return ret; + p = &s->num_cpu; break; case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE: + if (is_write) + return ret; + p = &s->features; break; case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE: @@ -956,7 +600,7 @@ static int kvm_eiointc_create(struct kvm_device *dev, u32 type) { int ret; struct loongarch_eiointc *s; - struct kvm_io_device *device, *device1; + struct kvm_io_device *device; struct kvm *kvm = dev->kvm; /* eiointc has been created */ @@ -984,10 +628,10 @@ static int kvm_eiointc_create(struct kvm_device *dev, u32 type) return ret; } - device1 = &s->device_vext; - kvm_iodevice_init(device1, &kvm_eiointc_virt_ops); + device = &s->device_vext; + kvm_iodevice_init(device, &kvm_eiointc_virt_ops); ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, - EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device1); + EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device); if (ret < 0) { kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device); kfree(s); diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c index fe734dc062ed4..e658d5b37c044 100644 --- a/arch/loongarch/kvm/intc/ipi.c +++ b/arch/loongarch/kvm/intc/ipi.c @@ -268,36 +268,16 @@ static int kvm_ipi_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, void *val) { - int ret; - struct loongarch_ipi *ipi; - - ipi = vcpu->kvm->arch.ipi; - if (!ipi) { - kvm_err("%s: ipi irqchip not valid!\n", __func__); - return -EINVAL; - } - ipi->kvm->stat.ipi_read_exits++; - ret = loongarch_ipi_readl(vcpu, addr, len, val); - - return ret; + vcpu->stat.ipi_read_exits++; + return loongarch_ipi_readl(vcpu, addr, len, val); } static int kvm_ipi_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { - int ret; - struct loongarch_ipi *ipi; - - ipi = vcpu->kvm->arch.ipi; - if (!ipi) { - kvm_err("%s: ipi irqchip not valid!\n", __func__); - return -EINVAL; - } - ipi->kvm->stat.ipi_write_exits++; - ret = loongarch_ipi_writel(vcpu, addr, len, val); - - return ret; + vcpu->stat.ipi_write_exits++; + return loongarch_ipi_writel(vcpu, addr, len, val); } static const struct kvm_io_device_ops kvm_ipi_ops = { diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c index 08fce845f6680..6f00ffe05c544 100644 --- a/arch/loongarch/kvm/intc/pch_pic.c +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -196,7 +196,7 @@ static int kvm_pch_pic_read(struct kvm_vcpu *vcpu, } /* statistics of pch pic reading */ - vcpu->kvm->stat.pch_pic_read_exits++; + vcpu->stat.pch_pic_read_exits++; ret = loongarch_pch_pic_read(s, addr, len, val); return ret; @@ -303,7 +303,7 @@ static int kvm_pch_pic_write(struct kvm_vcpu *vcpu, } /* statistics of pch pic writing */ - vcpu->kvm->stat.pch_pic_write_exits++; + vcpu->stat.pch_pic_write_exits++; ret = loongarch_pch_pic_write(s, addr, len, val); return ret; diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c index 4c3f22de4b40a..8462083f03017 100644 --- a/arch/loongarch/kvm/interrupt.c +++ b/arch/loongarch/kvm/interrupt.c @@ -83,28 +83,11 @@ void kvm_deliver_intr(struct kvm_vcpu *vcpu) unsigned long *pending = &vcpu->arch.irq_pending; unsigned long *pending_clr = &vcpu->arch.irq_clear; - if (!(*pending) && !(*pending_clr)) - return; - - if (*pending_clr) { - priority = __ffs(*pending_clr); - while (priority <= INT_IPI) { - kvm_irq_clear(vcpu, priority); - priority = find_next_bit(pending_clr, - BITS_PER_BYTE * sizeof(*pending_clr), - priority + 1); - } - } + for_each_set_bit(priority, pending_clr, INT_IPI + 1) + kvm_irq_clear(vcpu, priority); - if (*pending) { - priority = __ffs(*pending); - while (priority <= INT_IPI) { - kvm_irq_deliver(vcpu, priority); - priority = find_next_bit(pending, - BITS_PER_BYTE * sizeof(*pending), - priority + 1); - } - } + for_each_set_bit(priority, pending, INT_IPI + 1) + kvm_irq_deliver(vcpu, priority); } int kvm_pending_timer(struct kvm_vcpu *vcpu) diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 4d203294767c5..ed956c5cf2cc0 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -912,7 +912,7 @@ out: return err; } -int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write, int ecode) { int ret; @@ -921,8 +921,17 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) return ret; /* Invalidate this entry in the TLB */ - vcpu->arch.flush_gpa = gpa; - kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); + if (!cpu_has_ptw || (ecode == EXCCODE_TLBM)) { + /* + * With HW PTW, invalid TLB is not added when page fault. But + * for EXCCODE_TLBM exception, stale TLB may exist because of + * the last read access. + * + * With SW PTW, invalid TLB is added in TLB refill exception. + */ + vcpu->arch.flush_gpa = gpa; + kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); + } return 0; } diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h index 1783397b1bc88..145514dab6d5b 100644 --- a/arch/loongarch/kvm/trace.h +++ b/arch/loongarch/kvm/trace.h @@ -46,11 +46,15 @@ DEFINE_EVENT(kvm_transition, kvm_out, /* Further exit reasons */ #define KVM_TRACE_EXIT_IDLE 64 #define KVM_TRACE_EXIT_CACHE 65 +#define KVM_TRACE_EXIT_CPUCFG 66 +#define KVM_TRACE_EXIT_CSR 67 /* Tracepoints for VM exits */ #define kvm_trace_symbol_exit_types \ { KVM_TRACE_EXIT_IDLE, "IDLE" }, \ - { KVM_TRACE_EXIT_CACHE, "CACHE" } + { KVM_TRACE_EXIT_CACHE, "CACHE" }, \ + { KVM_TRACE_EXIT_CPUCFG, "CPUCFG" }, \ + { KVM_TRACE_EXIT_CSR, "CSR" } DECLARE_EVENT_CLASS(kvm_exit, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), @@ -82,6 +86,14 @@ DEFINE_EVENT(kvm_exit, kvm_exit_cache, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), TP_ARGS(vcpu, reason)); +DEFINE_EVENT(kvm_exit, kvm_exit_cpucfg, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +DEFINE_EVENT(kvm_exit, kvm_exit_csr, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + DEFINE_EVENT(kvm_exit, kvm_exit, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), TP_ARGS(vcpu, reason)); diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 5af32ec62cb16..d1b8c50941ca2 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -20,7 +20,13 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { STATS_DESC_COUNTER(VCPU, idle_exits), STATS_DESC_COUNTER(VCPU, cpucfg_exits), STATS_DESC_COUNTER(VCPU, signal_exits), - STATS_DESC_COUNTER(VCPU, hypercall_exits) + STATS_DESC_COUNTER(VCPU, hypercall_exits), + STATS_DESC_COUNTER(VCPU, ipi_read_exits), + STATS_DESC_COUNTER(VCPU, ipi_write_exits), + STATS_DESC_COUNTER(VCPU, eiointc_read_exits), + STATS_DESC_COUNTER(VCPU, eiointc_write_exits), + STATS_DESC_COUNTER(VCPU, pch_pic_read_exits), + STATS_DESC_COUNTER(VCPU, pch_pic_write_exits) }; const struct kvm_stats_header kvm_vcpu_stats_header = { diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile index fae77809048b8..ccea3bbd43531 100644 --- a/arch/loongarch/lib/Makefile +++ b/arch/loongarch/lib/Makefile @@ -11,5 +11,3 @@ obj-$(CONFIG_ARCH_SUPPORTS_INT128) += tishift.o obj-$(CONFIG_CPU_HAS_LSX) += xor_simd.o xor_simd_glue.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o - -obj-$(CONFIG_CRC32_ARCH) += crc32-loongarch.o diff --git a/arch/loongarch/lib/crc32-loongarch.c b/arch/loongarch/lib/crc32-loongarch.c deleted file mode 100644 index c44ee4f325578..0000000000000 --- a/arch/loongarch/lib/crc32-loongarch.c +++ /dev/null @@ -1,135 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * CRC32 and CRC32C using LoongArch crc* instructions - * - * Module based on mips/crypto/crc32-mips.c - * - * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org> - * Copyright (C) 2018 MIPS Tech, LLC - * Copyright (C) 2020-2023 Loongson Technology Corporation Limited - */ - -#include <asm/cpu-features.h> -#include <linux/crc32.h> -#include <linux/module.h> -#include <linux/unaligned.h> - -#define _CRC32(crc, value, size, type) \ -do { \ - __asm__ __volatile__( \ - #type ".w." #size ".w" " %0, %1, %0\n\t"\ - : "+r" (crc) \ - : "r" (value) \ - : "memory"); \ -} while (0) - -#define CRC32(crc, value, size) _CRC32(crc, value, size, crc) -#define CRC32C(crc, value, size) _CRC32(crc, value, size, crcc) - -static DEFINE_STATIC_KEY_FALSE(have_crc32); - -u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) -{ - if (!static_branch_likely(&have_crc32)) - return crc32_le_base(crc, p, len); - - while (len >= sizeof(u64)) { - u64 value = get_unaligned_le64(p); - - CRC32(crc, value, d); - p += sizeof(u64); - len -= sizeof(u64); - } - - if (len & sizeof(u32)) { - u32 value = get_unaligned_le32(p); - - CRC32(crc, value, w); - p += sizeof(u32); - } - - if (len & sizeof(u16)) { - u16 value = get_unaligned_le16(p); - - CRC32(crc, value, h); - p += sizeof(u16); - } - - if (len & sizeof(u8)) { - u8 value = *p++; - - CRC32(crc, value, b); - } - - return crc; -} -EXPORT_SYMBOL(crc32_le_arch); - -u32 crc32c_arch(u32 crc, const u8 *p, size_t len) -{ - if (!static_branch_likely(&have_crc32)) - return crc32c_base(crc, p, len); - - while (len >= sizeof(u64)) { - u64 value = get_unaligned_le64(p); - - CRC32C(crc, value, d); - p += sizeof(u64); - len -= sizeof(u64); - } - - if (len & sizeof(u32)) { - u32 value = get_unaligned_le32(p); - - CRC32C(crc, value, w); - p += sizeof(u32); - } - - if (len & sizeof(u16)) { - u16 value = get_unaligned_le16(p); - - CRC32C(crc, value, h); - p += sizeof(u16); - } - - if (len & sizeof(u8)) { - u8 value = *p++; - - CRC32C(crc, value, b); - } - - return crc; -} -EXPORT_SYMBOL(crc32c_arch); - -u32 crc32_be_arch(u32 crc, const u8 *p, size_t len) -{ - return crc32_be_base(crc, p, len); -} -EXPORT_SYMBOL(crc32_be_arch); - -static int __init crc32_loongarch_init(void) -{ - if (cpu_has_crc32) - static_branch_enable(&have_crc32); - return 0; -} -arch_initcall(crc32_loongarch_init); - -static void __exit crc32_loongarch_exit(void) -{ -} -module_exit(crc32_loongarch_exit); - -u32 crc32_optimizations(void) -{ - if (static_key_enabled(&have_crc32)) - return CRC32_LE_OPTIMIZATION | CRC32C_OPTIMIZATION; - return 0; -} -EXPORT_SYMBOL(crc32_optimizations); - -MODULE_AUTHOR("Min Zhou <zhoumin@loongson.cn>"); -MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>"); -MODULE_DESCRIPTION("CRC32 and CRC32C using LoongArch crc* instructions"); -MODULE_LICENSE("GPL v2"); diff --git a/arch/loongarch/lib/csum.c b/arch/loongarch/lib/csum.c index df309ae4045de..bcc9d01d8c413 100644 --- a/arch/loongarch/lib/csum.c +++ b/arch/loongarch/lib/csum.c @@ -2,6 +2,7 @@ // Copyright (C) 2019-2020 Arm Ltd. #include <linux/compiler.h> +#include <linux/export.h> #include <linux/kasan-checks.h> #include <linux/kernel.h> diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c index cea84d7f2b91a..02dad4624fe32 100644 --- a/arch/loongarch/mm/hugetlbpage.c +++ b/arch/loongarch/mm/hugetlbpage.c @@ -47,7 +47,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, pmd = pmd_offset(pud, addr); } } - return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd; + + return (!pmd || pmd_none(pmdp_get(pmd))) ? NULL : (pte_t *) pmd; } uint64_t pmd_to_entrylo(unsigned long pmd_val) diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 06f11d9e4ec11..c3e4586a79757 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -106,14 +106,6 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) page += vmem_altmap_offset(altmap); __remove_pages(start_pfn, nr_pages, altmap); } - -#ifdef CONFIG_NUMA -int memory_add_physaddr_to_nid(u64 start) -{ - return pa_to_nid(start); -} -EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); -#endif #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c index 70ca730198110..df949a3d0f34a 100644 --- a/arch/loongarch/mm/ioremap.c +++ b/arch/loongarch/mm/ioremap.c @@ -16,12 +16,12 @@ void __init early_iounmap(void __iomem *addr, unsigned long size) } -void *early_memremap_ro(resource_size_t phys_addr, unsigned long size) +void * __init early_memremap_ro(resource_size_t phys_addr, unsigned long size) { return early_memremap(phys_addr, size); } -void *early_memremap_prot(resource_size_t phys_addr, unsigned long size, +void * __init early_memremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { return early_memremap(phys_addr, size); diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c index 99165903908a4..f5e910b68229d 100644 --- a/arch/loongarch/mm/pageattr.c +++ b/arch/loongarch/mm/pageattr.c @@ -118,7 +118,7 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgp return 0; mmap_write_lock(&init_mm); - ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks); + ret = walk_kernel_page_table_range(start, end, &pageattr_ops, NULL, &masks); mmap_write_unlock(&init_mm); flush_tlb_kernel_range(start, end); diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index 22a94bb3e6e8b..352d9b2e02ab5 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -135,15 +135,6 @@ void kernel_pte_init(void *addr) } while (p != end); } -pmd_t mk_pmd(struct page *page, pgprot_t prot) -{ - pmd_t pmd; - - pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot); - - return pmd; -} - void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index fa1500d4aa3e3..abfdb6bb5c382 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -4,13 +4,20 @@ * * Copyright (C) 2022 Loongson Technology Corporation Limited */ +#include <linux/memory.h> #include "bpf_jit.h" -#define REG_TCC LOONGARCH_GPR_A6 -#define TCC_SAVED LOONGARCH_GPR_S5 +#define LOONGARCH_MAX_REG_ARGS 8 + +#define LOONGARCH_LONG_JUMP_NINSNS 5 +#define LOONGARCH_LONG_JUMP_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4) -#define SAVE_RA BIT(0) -#define SAVE_TCC BIT(1) +#define LOONGARCH_FENTRY_NINSNS 2 +#define LOONGARCH_FENTRY_NBYTES (LOONGARCH_FENTRY_NINSNS * 4) +#define LOONGARCH_BPF_FENTRY_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4) + +#define REG_TCC LOONGARCH_GPR_A6 +#define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (round_up(stack, 16) - 80) static const int regmap[] = { /* return value from in-kernel function, and exit value for eBPF program */ @@ -32,32 +39,57 @@ static const int regmap[] = { [BPF_REG_AX] = LOONGARCH_GPR_T0, }; -static void mark_call(struct jit_ctx *ctx) +static void prepare_bpf_tail_call_cnt(struct jit_ctx *ctx, int *store_offset) { - ctx->flags |= SAVE_RA; -} + const struct bpf_prog *prog = ctx->prog; + const bool is_main_prog = !bpf_is_subprog(prog); -static void mark_tail_call(struct jit_ctx *ctx) -{ - ctx->flags |= SAVE_TCC; -} + if (is_main_prog) { + /* + * LOONGARCH_GPR_T3 = MAX_TAIL_CALL_CNT + * if (REG_TCC > T3 ) + * std REG_TCC -> LOONGARCH_GPR_SP + store_offset + * else + * std REG_TCC -> LOONGARCH_GPR_SP + store_offset + * REG_TCC = LOONGARCH_GPR_SP + store_offset + * + * std REG_TCC -> LOONGARCH_GPR_SP + store_offset + * + * The purpose of this code is to first push the TCC into stack, + * and then push the address of TCC into stack. + * In cases where bpf2bpf and tailcall are used in combination, + * the value in REG_TCC may be a count or an address, + * these two cases need to be judged and handled separately. + */ + emit_insn(ctx, addid, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT); + *store_offset -= sizeof(long); -static bool seen_call(struct jit_ctx *ctx) -{ - return (ctx->flags & SAVE_RA); -} + emit_cond_jmp(ctx, BPF_JGT, REG_TCC, LOONGARCH_GPR_T3, 4); -static bool seen_tail_call(struct jit_ctx *ctx) -{ - return (ctx->flags & SAVE_TCC); -} + /* + * If REG_TCC < MAX_TAIL_CALL_CNT, the value in REG_TCC is a count, + * push tcc into stack + */ + emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset); -static u8 tail_call_reg(struct jit_ctx *ctx) -{ - if (seen_call(ctx)) - return TCC_SAVED; + /* Push the address of TCC into the REG_TCC */ + emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_SP, *store_offset); + + emit_uncond_jmp(ctx, 2); + + /* + * If REG_TCC > MAX_TAIL_CALL_CNT, the value in REG_TCC is an address, + * push tcc_ptr into stack + */ + emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset); + } else { + *store_offset -= sizeof(long); + emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset); + } - return REG_TCC; + /* Push tcc_ptr into stack */ + *store_offset -= sizeof(long); + emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset); } /* @@ -80,6 +112,10 @@ static u8 tail_call_reg(struct jit_ctx *ctx) * | $s4 | * +-------------------------+ * | $s5 | + * +-------------------------+ + * | tcc | + * +-------------------------+ + * | tcc_ptr | * +-------------------------+ <--BPF_REG_FP * | prog->aux->stack_depth | * | (optional) | @@ -88,22 +124,32 @@ static u8 tail_call_reg(struct jit_ctx *ctx) */ static void build_prologue(struct jit_ctx *ctx) { - int stack_adjust = 0, store_offset, bpf_stack_adjust; + int i, stack_adjust = 0, store_offset, bpf_stack_adjust; + const struct bpf_prog *prog = ctx->prog; + const bool is_main_prog = !bpf_is_subprog(prog); bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); - /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */ + /* To store ra, fp, s0, s1, s2, s3, s4, s5 */ stack_adjust += sizeof(long) * 8; + /* To store tcc and tcc_ptr */ + stack_adjust += sizeof(long) * 2; + stack_adjust = round_up(stack_adjust, 16); stack_adjust += bpf_stack_adjust; + /* Reserve space for the move_imm + jirl instruction */ + for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++) + emit_insn(ctx, nop); + /* - * First instruction initializes the tail call count (TCC). - * On tail call we skip this instruction, and the TCC is - * passed in REG_TCC from the caller. + * First instruction initializes the tail call count (TCC) + * register to zero. On tail call we skip this instruction, + * and the TCC is passed in REG_TCC from the caller. */ - emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT); + if (is_main_prog) + emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, 0); emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust); @@ -131,20 +177,13 @@ static void build_prologue(struct jit_ctx *ctx) store_offset -= sizeof(long); emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset); + prepare_bpf_tail_call_cnt(ctx, &store_offset); + emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust); if (bpf_stack_adjust) emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust); - /* - * Program contains calls and tail calls, so REG_TCC need - * to be saved across calls. - */ - if (seen_tail_call(ctx) && seen_call(ctx)) - move_reg(ctx, TCC_SAVED, REG_TCC); - else - emit_insn(ctx, nop); - ctx->stack_size = stack_adjust; } @@ -177,6 +216,16 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call) load_offset -= sizeof(long); emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset); + /* + * When push into the stack, follow the order of tcc then tcc_ptr. + * When pop from the stack, first pop tcc_ptr then followed by tcc. + */ + load_offset -= 2 * sizeof(long); + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset); + + load_offset += sizeof(long); + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset); + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust); if (!is_tail_call) { @@ -189,7 +238,7 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call) * Call the next bpf prog and skip the first instruction * of TCC initialization. */ - emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 1); + emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 6); } } @@ -208,12 +257,10 @@ bool bpf_jit_supports_far_kfunc_call(void) return true; } -/* initialized on the first pass of build_body() */ -static int out_offset = -1; -static int emit_bpf_tail_call(struct jit_ctx *ctx) +static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn) { - int off; - u8 tcc = tail_call_reg(ctx); + int off, tc_ninsn = 0; + int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size); u8 a1 = LOONGARCH_GPR_A1; u8 a2 = LOONGARCH_GPR_A2; u8 t1 = LOONGARCH_GPR_T1; @@ -222,7 +269,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) const int idx0 = ctx->idx; #define cur_offset (ctx->idx - idx0) -#define jmp_offset (out_offset - (cur_offset)) +#define jmp_offset (tc_ninsn - (cur_offset)) /* * a0: &ctx @@ -232,6 +279,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) * if (index >= array->map.max_entries) * goto out; */ + tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0]; off = offsetof(struct bpf_array, map.max_entries); emit_insn(ctx, ldwu, t1, a1, off); /* bgeu $a2, $t1, jmp_offset */ @@ -239,11 +287,15 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) goto toofar; /* - * if (--TCC < 0) - * goto out; + * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) + * goto out; */ - emit_insn(ctx, addid, REG_TCC, tcc, -1); - if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0) + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off); + emit_insn(ctx, ldd, t3, REG_TCC, 0); + emit_insn(ctx, addid, t3, t3, 1); + emit_insn(ctx, std, t3, REG_TCC, 0); + emit_insn(ctx, addid, t2, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT); + if (emit_tailcall_jmp(ctx, BPF_JSGT, t3, t2, jmp_offset) < 0) goto toofar; /* @@ -263,15 +315,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit_insn(ctx, ldd, t3, t2, off); __build_epilogue(ctx, true); - /* out: */ - if (out_offset == -1) - out_offset = cur_offset; - if (cur_offset != out_offset) { - pr_err_once("tail_call out_offset = %d, expected %d!\n", - cur_offset, out_offset); - return -1; - } - return 0; toofar: @@ -463,7 +506,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext u64 func_addr; bool func_addr_fixed, sign_extend; int i = insn - ctx->prog->insnsi; - int ret, jmp_offset; + int ret, jmp_offset, tcc_ptr_off; const u8 code = insn->code; const u8 cond = BPF_OP(code); const u8 t1 = LOONGARCH_GPR_T1; @@ -899,12 +942,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext /* function call */ case BPF_JMP | BPF_CALL: - mark_call(ctx); ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &func_addr, &func_addr_fixed); if (ret < 0) return ret; + if (insn->src_reg == BPF_PSEUDO_CALL) { + tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size); + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off); + } + move_addr(ctx, t1, func_addr); emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0); @@ -915,8 +962,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext /* tail call */ case BPF_JMP | BPF_TAIL_CALL: - mark_tail_call(ctx); - if (emit_bpf_tail_call(ctx) < 0) + if (emit_bpf_tail_call(ctx, i) < 0) return -EINVAL; break; @@ -1180,12 +1226,528 @@ static int validate_code(struct jit_ctx *ctx) return -1; } + return 0; +} + +static int validate_ctx(struct jit_ctx *ctx) +{ + if (validate_code(ctx)) + return -1; + if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries)) return -1; return 0; } +static int emit_jump_and_link(struct jit_ctx *ctx, u8 rd, u64 target) +{ + if (!target) { + pr_err("bpf_jit: jump target address is error\n"); + return -EFAULT; + } + + move_imm(ctx, LOONGARCH_GPR_T1, target, false); + emit_insn(ctx, jirl, rd, LOONGARCH_GPR_T1, 0); + + return 0; +} + +static int emit_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call) +{ + int i; + struct jit_ctx ctx; + + ctx.idx = 0; + ctx.image = (union loongarch_instruction *)insns; + + if (!target) { + for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++) + emit_insn((&ctx), nop); + return 0; + } + + return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target); +} + +static int emit_call(struct jit_ctx *ctx, u64 addr) +{ + return emit_jump_and_link(ctx, LOONGARCH_GPR_RA, addr); +} + +void *bpf_arch_text_copy(void *dst, void *src, size_t len) +{ + int ret; + + mutex_lock(&text_mutex); + ret = larch_insn_text_copy(dst, src, len); + mutex_unlock(&text_mutex); + + return ret ? ERR_PTR(-EINVAL) : dst; +} + +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, + void *old_addr, void *new_addr) +{ + int ret; + bool is_call = (poke_type == BPF_MOD_CALL); + u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; + u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; + + if (!is_kernel_text((unsigned long)ip) && + !is_bpf_text_address((unsigned long)ip)) + return -ENOTSUPP; + + ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call); + if (ret) + return ret; + + if (memcmp(ip, old_insns, LOONGARCH_LONG_JUMP_NBYTES)) + return -EFAULT; + + ret = emit_jump_or_nops(new_addr, ip, new_insns, is_call); + if (ret) + return ret; + + mutex_lock(&text_mutex); + if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES)) + ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES); + mutex_unlock(&text_mutex); + + return ret; +} + +int bpf_arch_text_invalidate(void *dst, size_t len) +{ + int i; + int ret = 0; + u32 *inst; + + inst = kvmalloc(len, GFP_KERNEL); + if (!inst) + return -ENOMEM; + + for (i = 0; i < (len / sizeof(u32)); i++) + inst[i] = INSN_BREAK; + + mutex_lock(&text_mutex); + if (larch_insn_text_copy(dst, inst, len)) + ret = -EINVAL; + mutex_unlock(&text_mutex); + + kvfree(inst); + + return ret; +} + +static void store_args(struct jit_ctx *ctx, int nargs, int args_off) +{ + int i; + + for (i = 0; i < nargs; i++) { + emit_insn(ctx, std, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off); + args_off -= 8; + } +} + +static void restore_args(struct jit_ctx *ctx, int nargs, int args_off) +{ + int i; + + for (i = 0; i < nargs; i++) { + emit_insn(ctx, ldd, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off); + args_off -= 8; + } +} + +static int invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l, + int args_off, int retval_off, int run_ctx_off, bool save_ret) +{ + int ret; + u32 *branch; + struct bpf_prog *p = l->link.prog; + int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); + + if (l->cookie) { + move_imm(ctx, LOONGARCH_GPR_T1, l->cookie, false); + emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off); + } else { + emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off); + } + + /* arg1: prog */ + move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false); + /* arg2: &run_ctx */ + emit_insn(ctx, addid, LOONGARCH_GPR_A1, LOONGARCH_GPR_FP, -run_ctx_off); + ret = emit_call(ctx, (const u64)bpf_trampoline_enter(p)); + if (ret) + return ret; + + /* store prog start time */ + move_reg(ctx, LOONGARCH_GPR_S1, LOONGARCH_GPR_A0); + + /* + * if (__bpf_prog_enter(prog) == 0) + * goto skip_exec_of_prog; + */ + branch = (u32 *)ctx->image + ctx->idx; + /* nop reserved for conditional jump */ + emit_insn(ctx, nop); + + /* arg1: &args_off */ + emit_insn(ctx, addid, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -args_off); + if (!p->jited) + move_imm(ctx, LOONGARCH_GPR_A1, (const s64)p->insnsi, false); + ret = emit_call(ctx, (const u64)p->bpf_func); + if (ret) + return ret; + + if (save_ret) { + emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); + emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); + } + + /* update branch with beqz */ + if (ctx->image) { + int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branch; + *branch = larch_insn_gen_beq(LOONGARCH_GPR_A0, LOONGARCH_GPR_ZERO, offset); + } + + /* arg1: prog */ + move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false); + /* arg2: prog start time */ + move_reg(ctx, LOONGARCH_GPR_A1, LOONGARCH_GPR_S1); + /* arg3: &run_ctx */ + emit_insn(ctx, addid, LOONGARCH_GPR_A2, LOONGARCH_GPR_FP, -run_ctx_off); + ret = emit_call(ctx, (const u64)bpf_trampoline_exit(p)); + + return ret; +} + +static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl, + int args_off, int retval_off, int run_ctx_off, u32 **branches) +{ + int i; + + emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -retval_off); + for (i = 0; i < tl->nr_links; i++) { + invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off, run_ctx_off, true); + emit_insn(ctx, ldd, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -retval_off); + branches[i] = (u32 *)ctx->image + ctx->idx; + emit_insn(ctx, nop); + } +} + +void *arch_alloc_bpf_trampoline(unsigned int size) +{ + return bpf_prog_pack_alloc(size, jit_fill_hole); +} + +void arch_free_bpf_trampoline(void *image, unsigned int size) +{ + bpf_prog_pack_free(image, size); +} + +static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, + const struct btf_func_model *m, struct bpf_tramp_links *tlinks, + void *func_addr, u32 flags) +{ + int i, ret, save_ret; + int stack_size = 0, nargs = 0; + int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off; + bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT; + void *orig_call = func_addr; + struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; + struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; + struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; + u32 **branches = NULL; + + if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) + return -ENOTSUPP; + + /* + * FP + 8 [ RA to parent func ] return address to parent + * function + * FP + 0 [ FP of parent func ] frame pointer of parent + * function + * FP - 8 [ T0 to traced func ] return address of traced + * function + * FP - 16 [ FP of traced func ] frame pointer of traced + * function + * + * FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or + * BPF_TRAMP_F_RET_FENTRY_RET + * [ argN ] + * [ ... ] + * FP - args_off [ arg1 ] + * + * FP - nargs_off [ regs count ] + * + * FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG + * + * FP - run_ctx_off [ bpf_tramp_run_ctx ] + * + * FP - sreg_off [ callee saved reg ] + * + * FP - tcc_ptr_off [ tail_call_cnt_ptr ] + */ + + if (m->nr_args > LOONGARCH_MAX_REG_ARGS) + return -ENOTSUPP; + + if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) + return -ENOTSUPP; + + stack_size = 0; + + /* Room of trampoline frame to store return address and frame pointer */ + stack_size += 16; + + save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); + if (save_ret) { + /* Save BPF R0 and A0 */ + stack_size += 16; + retval_off = stack_size; + } + + /* Room of trampoline frame to store args */ + nargs = m->nr_args; + stack_size += nargs * 8; + args_off = stack_size; + + /* Room of trampoline frame to store args number */ + stack_size += 8; + nargs_off = stack_size; + + /* Room of trampoline frame to store ip address */ + if (flags & BPF_TRAMP_F_IP_ARG) { + stack_size += 8; + ip_off = stack_size; + } + + /* Room of trampoline frame to store struct bpf_tramp_run_ctx */ + stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8); + run_ctx_off = stack_size; + + stack_size += 8; + sreg_off = stack_size; + + /* Room of trampoline frame to store tail_call_cnt_ptr */ + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { + stack_size += 8; + tcc_ptr_off = stack_size; + } + + stack_size = round_up(stack_size, 16); + + if (is_struct_ops) { + /* + * For the trampoline called directly, just handle + * the frame of trampoline. + */ + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size); + emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8); + emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); + emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size); + } else { + /* + * For the trampoline called from function entry, + * the frame of traced function and the frame of + * trampoline need to be considered. + */ + /* RA and FP for parent function */ + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -16); + emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8); + emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0); + emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 16); + + /* RA and FP for traced function */ + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size); + emit_insn(ctx, std, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8); + emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); + emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size); + } + + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off); + + /* callee saved register S1 to pass start time */ + emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off); + + /* store ip address of the traced function */ + if (flags & BPF_TRAMP_F_IP_ARG) { + move_imm(ctx, LOONGARCH_GPR_T1, (const s64)func_addr, false); + emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -ip_off); + } + + /* store nargs number */ + move_imm(ctx, LOONGARCH_GPR_T1, nargs, false); + emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -nargs_off); + + store_args(ctx, nargs, args_off); + + /* To traced function */ + /* Ftrace jump skips 2 NOP instructions */ + if (is_kernel_text((unsigned long)orig_call)) + orig_call += LOONGARCH_FENTRY_NBYTES; + /* Direct jump skips 5 NOP instructions */ + else if (is_bpf_text_address((unsigned long)orig_call)) + orig_call += LOONGARCH_BPF_FENTRY_NBYTES; + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false); + ret = emit_call(ctx, (const u64)__bpf_tramp_enter); + if (ret) + return ret; + } + + for (i = 0; i < fentry->nr_links; i++) { + ret = invoke_bpf_prog(ctx, fentry->links[i], args_off, retval_off, + run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET); + if (ret) + return ret; + } + if (fmod_ret->nr_links) { + branches = kcalloc(fmod_ret->nr_links, sizeof(u32 *), GFP_KERNEL); + if (!branches) + return -ENOMEM; + + invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off, run_ctx_off, branches); + } + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + restore_args(ctx, m->nr_args, args_off); + + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off); + + ret = emit_call(ctx, (const u64)orig_call); + if (ret) + goto out; + emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); + emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); + im->ip_after_call = ctx->ro_image + ctx->idx; + /* Reserve space for the move_imm + jirl instruction */ + for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++) + emit_insn(ctx, nop); + } + + for (i = 0; ctx->image && i < fmod_ret->nr_links; i++) { + int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branches[i]; + *branches[i] = larch_insn_gen_bne(LOONGARCH_GPR_T1, LOONGARCH_GPR_ZERO, offset); + } + + for (i = 0; i < fexit->nr_links; i++) { + ret = invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off, run_ctx_off, false); + if (ret) + goto out; + } + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + im->ip_epilogue = ctx->ro_image + ctx->idx; + move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false); + ret = emit_call(ctx, (const u64)__bpf_tramp_exit); + if (ret) + goto out; + } + + if (flags & BPF_TRAMP_F_RESTORE_REGS) + restore_args(ctx, m->nr_args, args_off); + + if (save_ret) { + emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); + emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); + } + + emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off); + + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off); + + if (is_struct_ops) { + /* trampoline called directly */ + emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8); + emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size); + + emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0); + } else { + /* trampoline called from function entry */ + emit_insn(ctx, ldd, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8); + emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size); + + emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8); + emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0); + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, 16); + + if (flags & BPF_TRAMP_F_SKIP_FRAME) + /* return to parent function */ + emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0); + else + /* return to traced function */ + emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T0, 0); + } + + ret = ctx->idx; +out: + kfree(branches); + + return ret; +} + +int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, + void *ro_image_end, const struct btf_func_model *m, + u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) +{ + int ret, size; + void *image, *tmp; + struct jit_ctx ctx; + + size = ro_image_end - ro_image; + image = kvmalloc(size, GFP_KERNEL); + if (!image) + return -ENOMEM; + + ctx.image = (union loongarch_instruction *)image; + ctx.ro_image = (union loongarch_instruction *)ro_image; + ctx.idx = 0; + + jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image)); + ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags); + if (ret > 0 && validate_code(&ctx) < 0) { + ret = -EINVAL; + goto out; + } + + tmp = bpf_arch_text_copy(ro_image, image, size); + if (IS_ERR(tmp)) { + ret = PTR_ERR(tmp); + goto out; + } + + bpf_flush_icache(ro_image, ro_image_end); +out: + kvfree(image); + return ret < 0 ? ret : size; +} + +int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, + struct bpf_tramp_links *tlinks, void *func_addr) +{ + int ret; + struct jit_ctx ctx; + struct bpf_tramp_image im; + + ctx.image = NULL; + ctx.idx = 0; + + ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags); + + /* Page align */ + return ret < 0 ? ret : round_up(ret * LOONGARCH_INSN_SIZE, PAGE_SIZE); +} + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { bool tmp_blinded = false, extra_pass = false; @@ -1288,7 +1850,7 @@ skip_init_ctx: build_epilogue(&ctx); /* 3. Extra pass to validate JITed code */ - if (validate_code(&ctx)) { + if (validate_ctx(&ctx)) { bpf_jit_binary_free(header); prog = orig_prog; goto out_offset; @@ -1342,7 +1904,6 @@ out: if (tmp_blinded) bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog); - out_offset = -1; return prog; @@ -1354,6 +1915,16 @@ out_free: goto out_offset; } +bool bpf_jit_bypass_spec_v1(void) +{ + return true; +} + +bool bpf_jit_bypass_spec_v4(void) +{ + return true; +} + /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ bool bpf_jit_supports_subprog_tailcalls(void) { diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h index f9c569f539491..5697158fd1645 100644 --- a/arch/loongarch/net/bpf_jit.h +++ b/arch/loongarch/net/bpf_jit.h @@ -18,6 +18,7 @@ struct jit_ctx { u32 *offset; int num_exentries; union loongarch_instruction *image; + union loongarch_instruction *ro_image; u32 stack_size; }; @@ -308,3 +309,8 @@ static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch return -EINVAL; } + +static inline void bpf_flush_icache(void *start, void *end) +{ + flush_icache_range((unsigned long)start, (unsigned long)end); +} diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index 1da4dc46df43e..50c9016641a48 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -194,6 +194,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) { struct pci_bus *bus; struct pci_root_info *info; + struct pci_host_bridge *host; struct acpi_pci_root_ops *root_ops; int domain = root->segment; int busnum = root->secondary.start; @@ -237,8 +238,17 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) return NULL; } - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); + /* If we must preserve the resource configuration, claim now */ + host = pci_find_host_bridge(bus); + if (host->preserve_config) + pci_bus_claim_resources(bus); + + /* + * Assign whatever was left unassigned. If we didn't claim above, + * this will reassign everything. + */ + pci_assign_unassigned_root_bus_resources(bus); + list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); } diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c index 2726639150bc7..5bc9627a6cf9c 100644 --- a/arch/loongarch/pci/pci.c +++ b/arch/loongarch/pci/pci.c @@ -3,7 +3,6 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ #include <linux/kernel.h> -#include <linux/export.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/types.h> diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile index ccd2c5e135c66..d8316f9934824 100644 --- a/arch/loongarch/vdso/Makefile +++ b/arch/loongarch/vdso/Makefile @@ -36,7 +36,7 @@ endif # VDSO linker flags. ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ - $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared --build-id -T + $(filter -E%,$(KBUILD_CFLAGS)) -shared --build-id -T # # Shared build commands. |